Merge "clk: qcom: mdss: remove DSI1 PLL configuration from DSI0 PLL" into msm-4.8
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index fd8fda9..498ae1d 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -6,6 +6,7 @@
 
 Required properties
 - compatible: Must be "qcom,sde-kms"
+- compatible: "msm-hdmi-audio-codec-rx";
 - reg: Offset and length of the register set for the device.
 - reg-names : Names to refer to register sets related to this device
 - clocks: List of Phandles for clock device nodes
@@ -57,6 +58,8 @@
 - qcom,sde-pp-off:		Array of offset addresses for the available
 				pingpong blocks. These offsets are calculated
 				from register "mdp_phys" defined in reg property.
+- qcom,sde-pp-slave:		Array of flags indicating whether each ping pong
+				block may be configured as a pp slave.
 - qcom,sde-intf-off:		Array of offset addresses for the available SDE
 				interface blocks that can drive data to a
 				panel controller. The offsets are calculated
@@ -106,6 +109,9 @@
 				It supports "qssedv3" and "qseedv2" entries for qseed
 				type. By default "qseedv2" is used if this optional property
 				is not defined.
+- qcom,sde-csc-type:		A string entry indicates csc support on sspp and wb.
+				It supports "csc" and "csc-10bit" entries for csc
+				type.
 - qcom,sde-highest-bank-bit:	A u32 property to indicate GPU/Camera/Video highest memory
 				bank bit used for tile format buffers.
 - qcom,sde-panic-per-pipe:	Boolean property to indicate if panic signal
@@ -134,12 +140,6 @@
 				indicating the danger luts on sspp.
 - qcom,sde-sspp-safe-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
 				indicating the safe luts on sspp.
-- qcom,sde-sspp-qseed-off:	A u32 offset value indicates the qseed block offset
-				from sspp base. It will install qseed property on
-				vig and rgb sspp pipes.
-- qcom,sde-sspp-csc-off:	A u32 offset value indicates the csc block offset
-				from sspp base. It will be used to install the csc
-				property on vig type pipe.
 - qcom,sde-sspp-max-rects:	Array of u32 values indicating maximum rectangles supported
 				on each sspp. This property is for multirect feature support.
 				Number of offsets defined should match the number of
@@ -165,14 +165,48 @@
 - qcom,sde-te-size:		A u32 value indicates the te block address range.
 - qcom,sde-te2-size:		A u32 value indicates the te2 block address range.
 - qcom,sde-dsc-off:	 	A u32 offset indicates the dsc block offset on pingpong.
-- qcom,sde-dspp-igc-off:	A u32 offset indicates the igc block offset on dssp.
-- qcom,sde-dspp-pcc-off:	A u32 offset indicates the pcc block offset on dssp.
-- qcom,sde-dspp-gc-off:		A u32 offset indicates the gc block offset on dssp.
-- qcom,sde-dspp-pa-off:		A u32 offset indicates the pa block offset on dssp.
-- qcom,sde-dspp-gamut-off:	A u32 offset indicates the gamut block offset on dssp.
-- qcom,sde-dspp-dither-off:	A u32 offset indicates the dither block offset on dssp.
-- qcom,sde-dspp-hist-off:	A u32 offset indicates the hist block offset on dssp.
-- qcom,sde-dspp-ad-off:		A u32 offset indicates the ad block offset on dssp.
+- qcom,sde-sspp-vig-blocks:	A node that lists the blocks inside the VIG hardware. The
+				block entries will contain the offset and version (if needed)
+				of each feature block. The presence of a block entry
+				indicates that the SSPP VIG contains that feature hardware.
+				e.g. qcom,sde-sspp-vig-blocks
+				-- qcom,sde-vig-csc-off: offset of CSC hardware
+				-- qcom,sde-vig-qseed-off: offset of QSEED hardware
+				-- qcom,sde-vig-pcc: offset and version of PCC hardware
+				-- qcom,sde-vig-hsic: offset and version of global PA adjustment
+				-- qcom,sde-vig-memcolor: offset and version of PA memcolor hardware
+- qcom,sde-sspp-rgb-blocks:	A node that lists the blocks inside the RGB hardware. The
+				block entries will contain the offset and version (if needed)
+				of each feature block. The presence of a block entry
+				indicates that the SSPP RGB contains that feature hardware.
+				e.g. qcom,sde-sspp-vig-blocks
+				-- qcom,sde-rgb-scaler-off: offset of RGB scaler hardware
+				-- qcom,sde-rgb-pcc: offset and version of PCC hardware
+- qcom,sde-dspp-blocks:		A node that lists the blocks inside the DSPP hardware. The
+				block entries will contain the offset and version of each
+				feature block. The presence of a block entry indicates that
+				the DSPP contains that feature hardware.
+				e.g. qcom,sde-dspp-blocks
+				-- qcom,sde-dspp-pcc: offset and version of PCC hardware
+				-- qcom,sde-dspp-gc: offset and version of GC hardware
+				-- qcom,sde-dspp-hsic: offset and version of global PA adjustment
+				-- qcom,sde-dspp-memcolor: offset and version of PA memcolor hardware
+				-- qcom,sde-dspp-sixzone: offset and version of PA sixzone hardware
+				-- qcom,sde-dspp-gamut: offset and version of Gamut mapping hardware
+				-- qcom,sde-dspp-dither: offset and version of dither hardware
+				-- qcom,sde-dspp-hist: offset and version of histogram hardware
+				-- qcom,sde-dspp-vlut: offset and version of PA vLUT hardware
+- qcom,sde-mixer-blocks:	A node that lists the blocks inside the layer mixer hardware. The
+				block entries will contain the offset and version (if needed)
+				of each feature block. The presence of a block entry
+				indicates that the layer mixer contains that feature hardware.
+				e.g. qcom,sde-mixer-blocks
+				-- qcom,sde-mixer-gc: offset and version of mixer GC hardware
+- qcom,sde-dspp-ad-off:		Array of u32 offsets indicate the ad block offset from the
+				DSPP offset. Since AD hardware is represented as part of
+				DSPP block, the AD offsets must be offset from the
+				corresponding DSPP base.
+- qcom,sde-dspp-ad-version	A u32 value indicating the version of the AD hardware
 - qcom,sde-vbif-id:		Array of vbif ids corresponding to the
 				offsets defined in property: qcom,sde-vbif-off.
 - qcom,sde-vbif-default-ot-rd-limit:	A u32 value indicates the default read OT limit
@@ -259,6 +293,8 @@
     qcom,sde-mixer-off = <0x00045000 0x00046000
 			0x00047000 0x0004a000>;
     qcom,sde-dspp-off = <0x00055000 0x00057000>;
+    qcom,sde-dspp-ad-off = <0x24000 0x22800>;
+    qcom,sde-dspp-ad-version = <0x00030000>;
     qcom,sde-wb-off = <0x00066000>;
     qcom,sde-wb-xin-id = <6>;
     qcom,sde-intf-off = <0x0006b000 0x0006b800
@@ -266,6 +302,7 @@
     qcom,sde-intf-type = "none", "dsi", "dsi", "hdmi";
     qcom,sde-pp-off = <0x00071000 0x00071800
 			  0x00072000 0x00072800>;
+    qcom,sde-pp-slave = <0x0 0x0 0x0 0x0>;
     qcom,sde-cdm-off = <0x0007a200>;
     qcom,sde-dsc-off = <0x00081000 0x00081400>;
     qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
@@ -315,6 +352,7 @@
     qcom,sde-sspp-scale-size = <0x100>;
     qcom,sde-mixer-blendstages = <0x8>;
     qcom,sde-qseed-type = "qseedv2";
+    qcom,sde-csc-type = "csc-10bit";
     qcom,sde-highest-bank-bit = <15>;
     qcom,sde-has-mixer-gc;
     qcom,sde-sspp-max-rects = <1 1 1 1
@@ -325,16 +363,6 @@
     qcom,sde-te2-off = <0x100>;
     qcom,sde-te-size = <0xffff>;
     qcom,sde-te2-size = <0xffff>;
-    qcom,sde-sspp-qseed-off = <0x100>;
-    qcom,sde-sspp-csc-off = <0x100>;
-    qcom,sde-dspp-igc-off = <0x100>;
-    qcom,sde-dspp-pcc-off = <0x100>;
-    qcom,sde-dspp-gc-off = <0x100>;
-    qcom,sde-dspp-pa-off = <0x100>;
-    qcom,sde-dspp-gamut-off = <0x100>;
-    qcom,sde-dspp-dither-off = <0x100>;
-    qcom,sde-dspp-hist-off = <0x100>;
-    qcom,sde-dspp-ad-off = <0x100>;
 
     qcom,sde-wb-id = <2>;
     qcom,sde-wb-clk-ctrl = <0x2bc 16>;
@@ -351,6 +379,40 @@
     qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
         <124416000 4>, <248832000 16>;
 
+    qcom,sde-sspp-vig-blocks {
+        qcom,sde-vig-csc-off = <0x320>;
+        qcom,sde-vig-qseed-off = <0x200>;
+        /* Offset from vig top, version of HSIC */
+        qcom,sde-vig-hsic = <0x200 0x00010000>;
+        qcom,sde-vig-memcolor = <0x200 0x00010000>;
+        qcom,sde-vig-pcc = <0x1780 0x00010000>;
+    };
+
+    qcom,sde-sspp-rgb-blocks {
+        qcom,sde-rgb-scaler-off = <0x200>;
+        qcom,sde-rgb-pcc = <0x380 0x00010000>;
+    };
+
+    qcom,sde-dspp-blocks {
+        qcom,sde-dspp-pcc = <0x1700 0x00010000>;
+        qcom,sde-dspp-gc = <0x17c0 0x00010000>;
+        qcom,sde-dspp-hsic = <0x0 0x00010000>;
+        qcom,sde-dspp-memcolor = <0x0 0x00010000>;
+        qcom,sde-dspp-sixzone = <0x0 0x00010000>;
+        qcom,sde-dspp-gamut = <0x1600 0x00010000>;
+        qcom,sde-dspp-dither = <0x0 0x00010000>;
+        qcom,sde-dspp-hist = <0x0 0x00010000>;
+        qcom,sde-dspp-vlut = <0x0 0x00010000>;
+    };
+
+    qcom,sde-mixer-blocks {
+        qcom,sde-mixer-gc = <0x3c0 0x00010000>;
+    };
+
+    qcom,msm-hdmi-audio-rx {
+        compatible = "qcom,msm-hdmi-audio-codec-rx";
+    };
+
     qcom,platform-supply-entries {
        #address-cells = <1>;
        #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
index 6be5332..ea35d14 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -62,7 +62,6 @@
 - qcom,dsi-phy: handle to dsi phy device
 - qcom,dsi-manager:       Specifies dsi manager is present
 - qcom,dsi-display:       Specifies dsi display is present
-- qcom,display-manager:   Specifies display manager is present
 - qcom,hdmi-display:      Specifies hdmi is present
 - qcom,dp-display:        Specified dp is present
 - qcom,<type>-supply-entries:		A node that lists the elements of the supply used by the
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
new file mode 100644
index 0000000..608b426
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -0,0 +1,742 @@
+Qualcomm mdss-dsi-panel
+
+mdss-dsi-panel is a dsi panel device which supports panels that
+are compatible with MIPI display serial interface specification.
+
+Required properties:
+- compatible:				This property applies to DSI V2 panels only.
+					This property should not be added for panels
+					that work based on version "V6.0"
+					DSI panels that are of different versions
+					are initialized by the drivers for dsi controller.
+					This property specifies the version
+					for DSI HW that this panel will work with
+					"qcom,dsi-panel-v2" = DSI V2.0
+- status:        			This property applies to DSI V2 panels only.
+					This property should not be added for panels
+					that work based on version "V6.0"
+					DSI panels that are of different versions
+					are initialized by the drivers for dsi controller.
+					A string that has to be set to "okay/ok"
+					to enable the panel driver. By default this property
+					will be set to "disable". Will be set to "ok/okay"
+					status for specific platforms.
+- qcom,mdss-dsi-panel-controller:	Specifies the phandle for the DSI controller that
+					this panel will be mapped to.
+- qcom,mdss-dsi-panel-width:		Specifies panel width in pixels.
+- qcom,mdss-dsi-panel-height:		Specifies panel height in pixels.
+- qcom,mdss-dsi-bpp:			Specifies the panel bits per pixel.
+					3  = for rgb111
+					8  = for rgb332
+					12 = for rgb444
+					16 = for rgb565
+					18 = for rgb666
+					24 = for rgb888
+- qcom,mdss-dsi-panel-destination:	A string that specifies the destination display for the panel.
+					"display_1" = DISPLAY_1
+					"display_2" = DISPLAY_2
+- qcom,mdss-dsi-panel-timings:		An array of length 12 that specifies the PHY
+					timing settings for the panel.
+- qcom,mdss-dsi-panel-timings-8996:		An array of length 40 char that specifies the 8996 PHY lane
+					timing settings for the panel.
+- qcom,mdss-dsi-on-command:		A byte stream formed by multiple dcs packets base on
+					qcom dsi controller protocol.
+					byte 0: dcs data type
+					byte 1: set to indicate this is an individual packet
+						 (no chain)
+					byte 2: virtual channel number
+					byte 3: expect ack from client (dcs read command)
+					byte 4: wait number of specified ms after dcs command
+						 transmitted
+					byte 5, 6: 16 bits length in network byte order
+					byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-off-command:		A byte stream formed by multiple dcs packets base on
+					qcom dsi controller protocol.
+					byte 0: dcs data type
+					byte 1: set to indicate this is an individual packet
+						 (no chain)
+					byte 2: virtual channel number
+					byte 3: expect ack from client (dcs read command)
+					byte 4: wait number of specified ms after dcs command
+						 transmitted
+					byte 5, 6: 16 bits length in network byte order
+					byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-post-panel-on-command:	same as "qcom,mdss-dsi-on-command" except commands are
+					sent after displaying an image.
+
+Note, if a short DCS packet(i.e packet with Byte 0:dcs data type as 05) mentioned in
+qcom,mdss-dsi-on-command/qcom,mdss-dsi-off-command stream fails to transmit,
+then 3 options can be tried.
+	1. Send the packet as a long packet instead
+				Byte 0: dcs data type = 05 (DCS short Packet)
+				Byte 0: dcs data type = 29 (DCS long Packet)
+	2. Send the packet in one burst by prepending with the next packet in packet stream
+				Byte 1 = 01 (indicates this is an individual packet)
+				Byte 1 = 00 (indicates this will be appended to the next
+					     individual packet in the packet stream)
+	3. Prepend a NULL packet to the short packet and send both in one burst instead of
+	   combining multiple short packets and sending them in one burst.
+
+Optional properties:
+- qcom,mdss-dsi-panel-name:		A string used as a descriptive name of the panel
+- qcom,cmd-sync-wait-broadcast:		Boolean used to broadcast dcs command to panels.
+- qcom,mdss-dsi-fbc-enable:		Boolean used to enable frame buffer compression mode.
+- qcom,mdss-dsi-fbc-slice-height:	Slice height(in lines) of compressed block.
+					Expressed as power of 2. To set as 128 lines,
+					this should be set to 7.
+- qcom,mdss-dsi-fbc-2d-pred-mode:	Boolean to enable 2D map prediction.
+- qcom,mdss-dsi-fbc-ver2-mode:		Boolean to enable FBC 2.0 that supports 1/3
+					compression.
+- qcom,mdss-dsi-fbc-bpp:		Compressed bpp supported by the panel.
+					Specified color order is used as default value.
+- qcom,mdss-dsi-fbc-packing:		Component packing.
+					0 = default value.
+- qcom,mdss-dsi-fbc-quant-error:	Boolean used to enable quantization error calculation.
+- qcom,mdss-dsi-fbc-bias:		Bias for CD.
+					0 = default value.
+- qcom,mdss-dsi-fbc-pat-mode:		Boolean used to enable PAT mode.
+- qcom,mdss-dsi-fbc-vlc-mode:		Boolean used to enable VLC mode.
+- qcom,mdss-dsi-fbc-bflc-mode:		Boolean used to enable BFLC mode.
+- qcom,mdss-dsi-fbc-h-line-budget:	Per line extra budget.
+					0 = default value.
+- qcom,mdss-dsi-fbc-budget-ctrl:		Extra budget level.
+					0 = default value.
+- qcom,mdss-dsi-fbc-block-budget:		Per block budget.
+					0 = default value.
+- qcom,mdss-dsi-fbc-lossless-threshold: Lossless mode threshold.
+					0 = default value.
+- qcom,mdss-dsi-fbc-lossy-threshold:	Lossy mode threshold.
+					0 = default value.
+- qcom,mdss-dsi-fbc-rgb-threshold:	Lossy RGB threshold.
+					0 = default value.
+- qcom,mdss-dsi-fbc-lossy-mode-idx:	Lossy mode index value.
+					0 = default value.
+- qcom,mdss-dsi-fbc-max-pred-err:	Max quantization prediction error.
+					0 = default value
+- qcom,mdss-dsi-h-back-porch:		Horizontal back porch value in pixel.
+					6 = default value.
+- qcom,mdss-dsi-h-front-porch:		Horizontal front porch value in pixel.
+					6 = default value.
+- qcom,mdss-dsi-h-pulse-width:		Horizontal pulse width.
+					2 = default value.
+- qcom,mdss-dsi-h-sync-skew:		Horizontal sync skew value.
+					0 = default value.
+- qcom,mdss-dsi-v-back-porch:		Vertical back porch value in pixel.
+					6 = default value.
+- qcom,mdss-dsi-v-front-porch:		Vertical front porch value in pixel.
+					6 = default value.
+- qcom,mdss-dsi-v-pulse-width:		Vertical pulse width.
+					2 = default value.
+- qcom,mdss-dsi-h-left-border:		Horizontal left border in pixel.
+					0 = default value
+- qcom,mdss-dsi-h-right-border:		Horizontal right border in pixel.
+					0 = default value
+- qcom,mdss-dsi-v-top-border:		Vertical top border in pixel.
+					0 = default value
+- qcom,mdss-dsi-v-bottom-border:	Vertical bottom border in pixel.
+					0 = default value
+- qcom,mdss-dsi-underflow-color:	Specifies the controller settings for the
+					panel under flow color.
+					0xff = default value.
+- qcom,mdss-dsi-border-color:		Defines the border color value if border is present.
+					0 = default value.
+- qcom,mdss-dsi-pan-enable-dynamic-fps:	Boolean used to enable change in frame rate dynamically.
+- qcom,mdss-dsi-pan-fps-update:		A string that specifies when to change the frame rate.
+					"dfps_suspend_resume_mode"= FPS change request is
+					implemented during suspend/resume.
+					"dfps_immediate_clk_mode" = FPS change request is
+					implemented immediately using DSI clocks.
+					"dfps_immediate_porch_mode_hfp" = FPS change request is
+					implemented immediately by changing panel horizontal
+					front porch values.
+					"dfps_immediate_porch_mode_vfp" = FPS change request is
+					implemented immediately by changing panel vertical
+					front porch values.
+- qcom,min-refresh-rate:		Minimum refresh rate supported by the panel.
+- qcom,max-refresh-rate:		Maximum refresh rate supported by the panel. If max refresh
+					rate is not specified, then the frame rate of the panel in
+					qcom,mdss-dsi-panel-framerate is used.
+- qcom,mdss-dsi-bl-pmic-control-type:	A string that specifies the implementation of backlight
+					control for this panel.
+					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
+					"bl_ctrl_wled" = Backlight controlled by WLED.
+					"bl_ctrl_dcs" = Backlight controlled by DCS commands.
+					other: Unknown backlight control. (default)
+- qcom,mdss-dsi-bl-pwm-pmi:		Boolean to indicate that PWM control is through second pmic chip.
+- qcom,mdss-dsi-bl-pmic-bank-select:	LPG channel for backlight.
+					Required if blpmiccontroltype is PWM
+- qcom,mdss-dsi-bl-pmic-pwm-frequency:	PWM period in microseconds.
+					Required if blpmiccontroltype is PWM
+- qcom,mdss-dsi-pwm-gpio:		PMIC gpio binding to backlight.
+					Required if blpmiccontroltype is PWM
+- qcom,mdss-dsi-bl-min-level:		Specifies the min backlight level supported by the panel.
+					0 = default value.
+- qcom,mdss-dsi-bl-max-level:		Specifies the max backlight level supported by the panel.
+					255 = default value.
+- qcom,mdss-brightness-max-level:	Specifies the max brightness level supported.
+					255 = default value.
+- qcom,mdss-dsi-interleave-mode:	Specifies interleave mode.
+					0 = default value.
+- qcom,mdss-dsi-panel-type:		Specifies the panel operating mode.
+					"dsi_video_mode" = enable video mode (default).
+					"dsi_cmd_mode" = enable command mode.
+- qcom,5v-boost-gpio:			Specifies the panel gpio for display 5v boost.
+- qcom,mdss-dsi-te-check-enable:	Boolean to enable Tear Check configuration.
+- qcom,mdss-dsi-te-using-te-pin:	Boolean to specify whether using hardware vsync.
+- qcom,mdss-dsi-te-pin-select:		Specifies TE operating mode.
+					0 = TE through embedded dcs command
+					1 = TE through TE gpio pin. (default)
+- qcom,mdss-dsi-te-dcs-command:		Inserts the dcs command.
+					1 = default value.
+- qcom,mdss-dsi-wr-mem-start:		DCS command for write_memory_start.
+					0x2c = default value.
+- qcom,mdss-dsi-wr-mem-continue:	DCS command for write_memory_continue.
+					0x3c = default value.
+- qcom,mdss-dsi-h-sync-pulse:		Specifies the pulse mode option for the panel.
+					0 = Don't send hsa/he following vs/ve packet(default)
+					1 = Send hsa/he following vs/ve packet
+- qcom,mdss-dsi-hfp-power-mode:		Boolean to determine DSI lane state during
+					horizontal front porch (HFP) blanking period.
+- qcom,mdss-dsi-hbp-power-mode:		Boolean to determine DSI lane state during
+					horizontal back porch (HBP) blanking period.
+- qcom,mdss-dsi-hsa-power-mode:		Boolean to determine DSI lane state during
+					horizontal sync active (HSA) mode.
+- qcom,mdss-dsi-last-line-interleave	Boolean to determine if last line
+					interleave flag needs to be enabled.
+- qcom,mdss-dsi-bllp-eof-power-mode:	Boolean to determine DSI lane state during
+					blanking low power period (BLLP) EOF mode.
+- qcom,mdss-dsi-bllp-power-mode:	Boolean to determine DSI lane state during
+					blanking low power period (BLLP) mode.
+- qcom,mdss-dsi-traffic-mode:		Specifies the panel traffic mode.
+					"non_burst_sync_pulse" = non burst with sync pulses (default).
+					"non_burst_sync_event" = non burst with sync start event.
+					"burst_mode" = burst mode.
+- qcom,mdss-dsi-pixel-packing:		Specifies if pixel packing is used (in case of RGB666).
+					"tight" = Tight packing (default value).
+					"loose" = Loose packing.
+- qcom,mdss-dsi-virtual-channel-id:	Specifies the virtual channel identefier.
+					0 = default value.
+- qcom,mdss-dsi-color-order:		Specifies the R, G and B channel ordering.
+					"rgb_swap_rgb" = DSI_RGB_SWAP_RGB (default value)
+					"rgb_swap_rbg" = DSI_RGB_SWAP_RBG
+					"rgb_swap_brg" = DSI_RGB_SWAP_BRG
+					"rgb_swap_grb" = DSI_RGB_SWAP_GRB
+					"rgb_swap_gbr" = DSI_RGB_SWAP_GBR
+- qcom,mdss-dsi-lane-0-state:		Boolean that specifies whether data lane 0 is enabled.
+- qcom,mdss-dsi-lane-1-state:		Boolean that specifies whether data lane 1 is enabled.
+- qcom,mdss-dsi-lane-2-state:		Boolean that specifies whether data lane 2 is enabled.
+- qcom,mdss-dsi-lane-3-state:		Boolean that specifies whether data lane 3 is enabled.
+- qcom,mdss-dsi-t-clk-post:		Specifies the byte clock cycles after mode switch.
+					0x03 = default value.
+- qcom,mdss-dsi-t-clk-pre:		Specifies the byte clock cycles before mode switch.
+					0x24 = default value.
+- qcom,mdss-dsi-stream:			Specifies the packet stream to be used.
+					0 = stream 0 (default)
+					1 = stream 1
+- qcom,mdss-dsi-mdp-trigger:		Specifies the trigger mechanism to be used for MDP path.
+					"none" = no trigger
+					"trigger_te" = Tear check signal line used for trigger
+					"trigger_sw" = Triggered by software (default)
+					"trigger_sw_te" = Software trigger and TE
+- qcom,mdss-dsi-dma-trigger:		Specifies the trigger mechanism to be used for DMA path.
+					"none" = no trigger
+					"trigger_te" = Tear check signal line used for trigger
+					"trigger_sw" = Triggered by software (default)
+					"trigger_sw_seof" = Software trigger and start/end of frame trigger.
+					"trigger_sw_te" = Software trigger and TE
+- qcom,mdss-dsi-panel-framerate:	Specifies the frame rate for the panel.
+					60 = 60 frames per second (default)
+- qcom,mdss-dsi-panel-clockrate:	A 64 bit value specifies the panel clock speed in Hz.
+					0 = default value.
+- qcom,mdss-mdp-transfer-time-us:	Specifies the dsi transfer time for command mode
+					panels in microseconds. Driver uses this number to adjust
+					the clock rate according to the expected transfer time.
+					Increasing this value would slow down the mdp processing
+					and can result in slower performance.
+					Decreasing this value can speed up the mdp processing,
+					but this can also impact power consumption.
+					As a rule this time should not be higher than the time
+					that would be expected with the processing at the
+					dsi link rate since anyways this would be the maximum
+					transfer time that could be achieved.
+					If ping pong split enabled, this time should not be higher
+					than two times the dsi link rate time.
+					14000 = default value.
+- qcom,mdss-dsi-on-command-state:	String that specifies the ctrl state for sending ON commands.
+					"dsi_lp_mode" = DSI low power mode (default)
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-off-command-state:	String that specifies the ctrl state for sending OFF commands.
+					"dsi_lp_mode" = DSI low power mode (default)
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-post-mode-switch-on-command-state:	String that specifies the ctrl state for sending ON commands post mode switch.
+					"dsi_lp_mode" = DSI low power mode (default)
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-pan-physical-width-dimension:	Specifies panel physical width in mm which corresponds
+					to the physical width in the framebuffer information.
+- qcom,mdss-pan-physical-height-dimension:	Specifies panel physical height in mm which corresponds
+					to the physical height in the framebuffer information.
+- qcom,mdss-dsi-mode-sel-gpio-state:	String that specifies the lcd mode for panel
+					(such as single-port/dual-port), if qcom,panel-mode-gpio
+					binding is defined in dsi controller.
+					"dual_port" = Set GPIO to LOW
+					"single_port" = Set GPIO to HIGH
+					"high" = Set GPIO to HIGH
+					"low" = Set GPIO to LOW
+					The default value is "dual_port".
+- qcom,mdss-tear-check-disable:		Boolean to disable mdp tear check. Tear check is enabled by default to avoid
+					tearing. Other tear-check properties are ignored if this property is present.
+					The below tear check configuration properties can be individually tuned if
+					tear check is enabled.
+- qcom,mdss-tear-check-sync-cfg-height: Specifies the vertical total number of lines.
+					The default value is 0xfff0.
+- qcom,mdss-tear-check-sync-init-val:	Specifies the init value at which the read pointer gets loaded
+					at vsync edge. The reader pointer refers to the line number of
+					panel buffer that is currently being updated.
+					The default value is panel height.
+- qcom,mdss-tear-check-sync-threshold-start:
+					Allows the first ROI line write to an panel when read pointer is
+					between the range of ROI start line and ROI start line plus this
+					setting.
+					The default value is 4.
+- qcom,mdss-tear-check-sync-threshold-continue:
+					The minimum number of lines the write pointer needs to be
+					above the read pointer so that it is safe to write to the panel.
+					(This check is not done for the first ROI line write of an update)
+					The default value is 4.
+- qcom,mdss-tear-check-start-pos:	Specify the y position from which the start_threshold value is
+					added and write is kicked off if the read pointer falls within that
+					region.
+					The default value is panel height.
+- qcom,mdss-tear-check-rd-ptr-trigger-intr:
+					Specify the read pointer value at which an interrupt has to be
+					generated.
+					The default value is panel height + 1.
+- qcom,mdss-tear-check-frame-rate:	Specify the value to be a real frame rate(fps) x 100 factor to tune the
+					timing of TE simulation with more precision.
+					The default value is 6000 with 60 fps.
+- qcom,mdss-dsi-reset-sequence:		An array that lists the
+					sequence of reset gpio values and sleeps
+					Each command will have the format defined
+					as below:
+					--> Reset GPIO value
+					--> Sleep value (in ms)
+- qcom,partial-update-enabled:		Boolean used to enable partial
+					panel update for command mode panels.
+- qcom,mdss-dsi-horizontal-line-idle:	List of width ranges (EC - SC) in pixels indicating
+					additional idle time in dsi clock cycles that is needed
+					to compensate for smaller line width.
+- qcom,partial-update-roi-merge:	Boolean indicates roi combination is need
+					and function has been provided for dcs
+					2A/2B command.
+- qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
+					through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-lp11-init:		Boolean used to enable the DSI clocks and data lanes (low power 11)
+					before issuing hardware reset line.
+- qcom,mdss-dsi-init-delay-us:		Delay in microseconds(us) before performing any DSI activity in lp11
+					mode. This master delay (t_init_delay as per DSI spec) should be sum
+					of DSI internal delay to reach fuctional after power up and minimum
+					delay required by panel to reach functional.
+- qcom,mdss-dsi-rx-eot-ignore:		Boolean used to enable ignoring end of transmission packets.
+- qcom,mdss-dsi-tx-eot-append:		Boolean used to enable appending end of transmission packets.
+- qcom,ulps-enabled:			Boolean to enable support for Ultra Low Power State (ULPS) mode.
+- qcom,suspend-ulps-enabled:		Boolean to enable support for ULPS mode for panels during suspend state.
+- qcom,panel-roi-alignment:		Specifies the panel ROI alignment restrictions on its
+					left, top, width, height alignments and minimum width and
+					height values
+- qcom,esd-check-enabled:		Boolean used to enable ESD recovery feature.
+- qcom,mdss-dsi-panel-status-command:	A byte stream formed by multiple dcs packets based on
+					qcom dsi controller protocol, to read the panel status.
+					This value is used to kick in the ESD recovery.
+					byte 0: dcs data type
+					byte 1: set to indicate this is an individual packet
+						 (no chain)
+					byte 2: virtual channel number
+					byte 3: expect ack from client (dcs read command)
+					byte 4: wait number of specified ms after dcs command
+						 transmitted
+					byte 5, 6: 16 bits length in network byte order
+					byte 7 and beyond: number byte of payload
+- qcom,mdss-dsi-panel-status-command-mode:
+					String that specifies the ctrl state for reading the panel status.
+					"dsi_lp_mode" = DSI low power mode
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-panel-status-check-mode:Specifies the panel status check method for ESD recovery.
+					"bta_check" = Uses BTA to check the panel status
+					"reg_read" = Reads panel status register to check the panel status
+					"reg_read_nt35596" = Reads panel status register to check the panel
+							     status for NT35596 panel.
+					"te_signal_check" = Uses TE signal behaviour to check the panel status
+- qcom,mdss-dsi-panel-status-read-length: Integer array that specify the expected read-back length of values
+					  for each of panel registers. Each length is corresponding to number of
+					  returned parameters of register introduced in specification.
+- qcom,mdss-dsi-panel-status-valid-params: Integer array that specify the valid returned values which need to check
+					   for each of register.
+					   Some panel need only check the first few values returned from panel.
+					   So: if this property is the same to qcom,mdss-dsi-panel-status-read-length,
+					   then just ignore this one.
+- qcom,mdss-dsi-panel-status-value:	Multiple integer arrays, each specifies the values of the panel status register
+					which is used to check the panel status. The size of each array is the sum of
+					length specified in qcom,mdss-dsi-panel-status-read-length, and must be equal.
+					This can cover that Some panel may return several alternative values.
+- qcom,mdss-dsi-panel-max-error-count:  Integer value that specifies the maximum number of errors from register
+					read that can be ignored before treating that the panel has gone bad.
+- qcom,dynamic-mode-switch-enabled:		Boolean used to mention whether panel supports
+					dynamic switching from video mode to command mode
+					and vice versa.
+- qcom,dynamic-mode-switch-type:		A string specifies how to perform dynamic mode switch.
+						If qcom,dynamic-mode-switch-enabled is set and no string specified, default value is
+						dynamic-switch-suspend-resume.
+					"dynamic-switch-suspend-resume"= Switch using suspend/resume. Panel will
+						go blank during transition.
+					"dynamic-switch-immediate"= Switch on next frame update. Panel will
+						not go blank for this transition.
+					"dynamic-resolution-switch-immediate"= Switch the panel resolution. Panel will
+						not go blank for this transition.
+- qcom,mdss-dsi-post-mode-switch-on-command:		Multiple dcs packets used for turning on DSI panel
+					after panel has switch modes.
+					Refer to "qcom,mdss-dsi-on-command" section for adding commands.
+- qcom,video-to-cmd-mode-switch-commands:	List of commands that need to be sent
+					to panel in order to switch from video mode to command mode dynamically.
+					Refer to "qcom,mdss-dsi-on-command" section for adding commands.
+- qcom,cmd-to-video-mode-switch-commands:	List of commands that need to be sent
+					to panel in order to switch from command mode to video mode dynamically.
+					Refer to "qcom,mdss-dsi-on-command" section for adding commands.
+- qcom,send-pps-before-switch:		Boolean propety to indicate when PPS commands should be sent,
+					either before or after switch commands during dynamic resolution
+					switch in DSC panels. If the property is not present, the default
+					behavior is to send PPS commands after the switch commands.
+- qcom,mdss-dsi-panel-orientation:	String used to indicate orientation of panel
+					"180" = panel is flipped in both horizontal and vertical directions
+					"hflip" = panel is flipped in horizontal direction
+					"vflip" = panel is flipped in vertical direction
+- qcom,panel-ack-disabled: A boolean property to indicate, whether we need to wait for any ACK from the panel
+			   for any commands that we send.
+- qcom,mdss-dsi-force-clock-lane-hs:	Boolean to force dsi clock lanes to HS mode always.
+
+- qcom,compression-mode:		Select compression mode for panel.
+					"fbc" - frame buffer compression
+					"dsc" - display stream compression.
+					If "dsc" compression is used then config subnodes needs to be defined.
+- qcom,panel-supply-entries:		A node that lists the elements of the supply used to
+					power the DSI panel. There can be more than one instance
+					of this binding, in which case the entry would be appended
+					with the supply entry index. For a detailed description of
+					fields in the supply entry, refer to the qcom,ctrl-supply-entries
+					binding above.
+- qcom,config-select:			Optional property to select default configuration.
+
+[[Optional config sub-nodes]]		These subnodes provide different configurations for a given same panel.
+					Default configuration can be chosen by specifying phandle of the
+					selected subnode in the qcom,config-select.
+Required properties for sub-nodes:	None
+Optional properites:
+- qcom,lm-split:			An array of two values indicating MDP should use two layer
+					mixers to reduce power.
+					Ex: Normally 1080x1920 display uses single DSI and thus one layer
+					    mixer. But if we use two layer mixers then mux the output of
+					    those two mixers into single stream and route it to single DSI
+					    then we can lower the clock requirements of MDP. To use this
+					    configuration we need two fill this array with <540 540>.
+					Both values doesn't have to be same, but recommended, however sum of
+					both values has to be equal to the panel-width.
+					By default two mixer streams are merged using 2D mux, however if
+					2 DSC encoders are used then merge is performed within compression
+					engine.
+- qcom,split-mode:			String property indicating which split mode MDP should use. Valid
+					entries are "pingpong-split" and "dualctl-split".
+					This property is mutually exclusive with qcom,lm-split.
+- qcom,mdss-dsc-version:		An 8 bit value indicates the DSC version supported by panel. Bits[0.3]
+					provides information about minor version while Bits[4.7] provides
+					major version information. It supports only DSC rev 1(Major).1(Minor)
+					right now.
+- qcom,mdss-dsc-scr-version:		Each DSC version can have multiple SCR. This 8 bit value indicates
+					current SCR revision information supported by panel.
+- qcom,mdss-dsc-encoders:		An integer value indicating how many DSC encoders should be used
+					to drive data stream to DSI.
+					Default value is 1 and max value is 2.
+					2 encoder should be used only if qcom,mdss-lm-split or
+					qcom,split-mode with pingpong-split is used.
+- qcom,mdss-dsc-slice-height:		An integer value indicates the dsc slice height.
+- qcom,mdss-dsc-slice-width:		An integer value indicates the dsc slice width.
+					Multiple of slice width should be equal to panel-width.
+					Maximum 2 slices per DSC encoder can be used so if 2 DSC encoders
+					are used then minimum slice width is equal to panel-width/4.
+- qcom,mdss-dsc-slice-per-pkt:		An integer value indicates the slice per dsi packet.
+- qcom,mdss-dsc-bit-per-component: 	An integer value indicates the bits per component before compression.
+- qcom,mdss-dsc-bit-per-pixel:		An integer value indicates the bits per pixel after compression.
+- qcom,mdss-dsc-block-prediction-enable: A boolean value to enable/disable the block prediction at decoder.
+- qcom,mdss-dsc-config-by-manufacture-cmd: A boolean to indicates panel use manufacture command to setup pps
+					instead of standard dcs type 0x0A.
+- qcom,dba-panel:	Indicates whether the current panel is used as a display bridge
+					to a non-DSI interface.
+- qcom,bridge-name:			A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
+					is required if qcom,dba-panel is defined for the panel.
+- qcom,adjust-timer-wakeup-ms:		An integer value to indicate the timer delay(in ms) to accommodate
+					s/w delay while configuring the event timer wakeup logic.
+
+- qcom,mdss-dsi-display-timings:	Parent node that lists the different resolutions that the panel supports.
+					Each child represents timings settings for a specific resolution.
+- qcom,mdss-dsi-post-init-delay:        Specifies required number of frames to wait so that panel can be functional
+					to show proper display.
+
+Additional properties added to the second level nodes that represent timings properties:
+- qcom,mdss-dsi-timing-default:		Property that specifies the current child as the default
+					timing configuration that will be used.
+- qcom,mdss-dsi-timing-switch-command:	List of commands that need to be sent
+					to panel when the resolution/timing switch happens dynamically.
+					Refer to "qcom,mdss-dsi-on-command" section for adding commands.
+- qcom,mdss-dsi-timing-switch-command-state:	String that specifies the ctrl state for sending resolution switch
+					commands.
+					"dsi_lp_mode" = DSI low power mode (default)
+					"dsi_hs_mode" = DSI high speed mode
+
+Note, if a given optional qcom,* binding is not present, then the driver will configure
+the default values specified.
+
+Example:
+&mdss_mdp {
+	dsi_sim_vid: qcom,mdss_dsi_sim_video {
+		qcom,mdss-dsi-panel-name = "simulator video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-height = <1280>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-pixel-packing = <0>;
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-fbc-enable;
+		qcom,mdss-dsi-fbc-slice-height = <5>;
+		qcom,mdss-dsi-fbc-2d-pred-mode;
+		qcom,mdss-dsi-fbc-ver2-mode;
+		qcom,mdss-dsi-fbc-bpp = <0>;
+		qcom,mdss-dsi-fbc-packing = <0>;
+		qcom,mdss-dsi-fbc-quant-error;
+		qcom,mdss-dsi-fbc-bias = <0>;
+		qcom,mdss-dsi-fbc-pat-mode;
+		qcom,mdss-dsi-fbc-vlc-mode;
+		qcom,mdss-dsi-fbc-bflc-mode;
+		qcom,mdss-dsi-fbc-h-line-budget = <0>;
+		qcom,mdss-dsi-fbc-budget-ctrl = <0>;
+		qcom,mdss-dsi-fbc-block-budget = <0>;
+		qcom,mdss-dsi-fbc-lossless-threshold = <0>;
+		qcom,mdss-dsi-fbc-lossy-threshold = <0>;
+		qcom,mdss-dsi-fbc-rgb-threshold = <0>;
+		qcom,mdss-dsi-fbc-lossy-mode-idx = <0>;
+		qcom,mdss-dsi-fbc-max-pred-err = <2>;
+		qcom,mdss-dsi-h-front-porch = <140>;
+		qcom,mdss-dsi-h-back-porch = <164>;
+		qcom,mdss-dsi-h-pulse-width = <8>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <6>;
+		qcom,mdss-dsi-v-front-porch = <1>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = < 15>;
+		qcom,mdss-brightness-max-level = <255>;
+		qcom,mdss-dsi-interleave-mode = <0>;
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-hfp-power-mode;
+		qcom,mdss-dsi-hbp-power-mode;
+		qcom,mdss-dsi-hsa-power-mode;
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-last-line-interleave;
+		qcom,mdss-dsi-traffic-mode = <0>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-color-order = <0>;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-t-clk-post = <0x20>;
+		qcom,mdss-dsi-t-clk-pre = <0x2c>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-mdp-trigger = <0>;
+		qcom,mdss-dsi-dma-trigger = <0>;
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-panel-clockrate = <424000000>;
+		qcom,mdss-mdp-transfer-time-us = <12500>;
+		qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
+					22 27 1e 03 04 00];
+                qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
+                                23 20 06 09 05 03 04 a0
+                                23 20 06 09 05 03 04 a0
+                                23 20 06 09 05 03 04 a0
+                                23 2e 06 08 05 03 04 a0];
+		qcom,mdss-dsi-on-command = [32 01 00 00 00 00 02 00 00
+					29 01 00 00 10 00 02 FF 99];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command = [22 01 00 00 00 00 00];
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-pan-enable-dynamic-fps;
+		qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
+		qcom,min-refresh-rate = <30>;
+		qcom,max-refresh-rate = <60>;
+		qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+		qcom,mdss-dsi-bl-pmic-pwm-frequency = <0>;
+		qcom,mdss-dsi-pwm-gpio = <&pm8941_mpps 5 0>;
+		qcom,5v-boost-gpio = <&pm8994_gpios 14 0>;
+		qcom,mdss-pan-physical-width-dimension = <60>;
+		qcom,mdss-pan-physical-height-dimension = <140>;
+		qcom,mdss-dsi-mode-sel-gpio-state = "dsc_mode";
+		qcom,mdss-tear-check-sync-cfg-height = <0xfff0>;
+		qcom,mdss-tear-check-sync-init-val = <1280>;
+		qcom,mdss-tear-check-sync-threshold-start = <4>;
+		qcom,mdss-tear-check-sync-threshold-continue = <4>;
+		qcom,mdss-tear-check-start-pos = <1280>;
+		qcom,mdss-tear-check-rd-ptr-trigger-intr = <1281>;
+		qcom,mdss-tear-check-frame-rate = <6000>;
+		qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
+		qcom,partial-update-enabled;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-init-delay-us = <100>;
+		mdss-dsi-rx-eot-ignore;
+		mdss-dsi-tx-eot-append;
+		qcom,ulps-enabled;
+		qcom,suspend-ulps-enabled;
+		qcom,panel-roi-alignment = <4 4 2 2 20 20>;
+		qcom,esd-check-enabled;
+		qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08];
+		qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+		qcom,mdss-dsi-panel-status-read-length = <8>;
+		qcom,mdss-dsi-panel-max-error-count = <3>;
+		qcom,mdss-dsi-panel-status-value = <0x1c 0x00 0x05 0x02 0x40 0x84 0x06 0x01>;
+		qcom,dynamic-mode-switch-enabled;
+		qcom,dynamic-mode-switch-type = "dynamic-switch-immediate";
+		qcom,mdss-dsi-post-mode-switch-on-command = [32 01 00 00 00 00 02 00 00
+					29 01 00 00 10 00 02 B0 03];
+		qcom,video-to-cmd-mode-switch-commands = [15 01 00 00 00 00 02 C2 0B
+						15 01 00 00 00 00 02 C2 08];
+		qcom,cmd-to-video-mode-switch-commands = [15 01 00 00 00 00 02 C2 03];
+		qcom,send-pps-before-switch;
+		qcom,panel-ack-disabled;
+		qcom,mdss-dsi-horizontal-line-idle = <0 40 256>,
+						<40 120 128>,
+						<128 240 64>;
+		qcom,mdss-dsi-panel-orientation = "180"
+		qcom,mdss-dsi-force-clock-lane-hs;
+		qcom,compression-mode = "dsc";
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-display-timings {
+			wqhd {
+				qcom,mdss-dsi-timing-default;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <20>;
+				qcom,mdss-dsi-h-back-porch = <8>;
+				qcom,mdss-dsi-h-pulse-width = <8>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <4>;
+				qcom,mdss-dsi-v-front-porch = <728>;
+				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings = [E6 38 26 00 68 6E 2A 3C 2C 03 04 00];
+				qcom,mdss-dsi-t-clk-post = <0x02>;
+				qcom,mdss-dsi-t-clk-pre = <0x2a>;
+				qcom,mdss-dsi-on-command = [05 01 00 00 a0 00 02 11 00
+					05 01 00 00 02 00 02 29 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-timing-switch-command = [
+					29 00 00 00 00 00 02 B0 04
+					29 00 00 00 00 00 02 F1 00];
+				qcom,mdss-dsi-timing-switch-command-state = "dsi_lp_mode";
+
+				qcom,config-select = <&dsi_sim_vid_config0>;
+				dsi_sim_vid_config0: config0 {
+					qcom,lm-split = <360 360>;
+					qcom,mdss-dsc-encoders = <2>;
+					qcom,mdss-dsc-slice-height = <16>;
+					qcom,mdss-dsc-slice-width = <360>;
+					qcom,mdss-dsc-slice-per-pkt = <2>;
+					qcom,mdss-dsc-bit-per-component = <8>;
+					qcom,mdss-dsc-bit-per-pixel = <8>;
+					qcom,mdss-dsc-block-prediction-enable;
+					qcom,mdss-dsc-config-by-manufacture-cmd;
+				};
+			};
+		};
+		qcom,panel-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,panel-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdd";
+				qcom,supply-min-voltage = <2800000>;
+				qcom,supply-max-voltage = <2800000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+				qcom,supply-pre-on-sleep = <0>;
+				qcom,supply-post-on-sleep = <0>;
+				qcom,supply-pre-off-sleep = <0>;
+				qcom,supply-post-off-sleep = <0>;
+			};
+
+			qcom,panel-supply-entry@1 {
+				reg = <1>;
+				qcom,supply-name = "vddio";
+				qcom,supply-min-voltage = <1800000>;
+				qcom,supply-max-voltage = <1800000>;
+				qcom,supply-enable-load = <100000>;
+				qcom,supply-disable-load = <100>;
+				qcom,supply-pre-on-sleep = <0>;
+				qcom,supply-post-on-sleep = <0>;
+				qcom,supply-pre-off-sleep = <0>;
+				qcom,supply-post-off-sleep = <0>;
+			};
+		};
+
+		qcom,config-select = <&dsi_sim_vid_config0>;
+		qcom,dba-panel;
+		qcom,bridge-name = "adv7533";
+		qcom,mdss-dsc-version = <0x11>;
+		qcom,mdss-dsc-scr-version = <0x1>;
+
+		dsi_sim_vid_config0: config0 {
+			qcom,lm-split = <360 360>;
+			qcom,mdss-dsc-encoders = <2>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <360>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+			qcom,mdss-dsc-config-by-manufacture-cmd;
+		};
+
+		dsi_sim_vid_config1: config1 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <360>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+			qcom,mdss-dsc-config-by-manufacture-cmd;
+		};
+
+		dsi_sim_vid_config2: config2 {
+			qcom,split-mode = "dualctl-split";
+		};
+
+		dsi_sim_vid_config3: config3 {
+			qcom,split-mode = "pingpong-split";
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/regulator/proxy-consumer.txt b/Documentation/devicetree/bindings/regulator/proxy-consumer.txt
new file mode 100644
index 0000000..c3fddd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/proxy-consumer.txt
@@ -0,0 +1,32 @@
+Regulator Proxy Consumer Bindings
+
+Regulator proxy consumers provide a means to use a default regulator state
+during bootup only which is removed at the end of boot.  This feature can be
+used in situations where a shared regulator can be scaled between several
+possible voltages and hardware requires that it be at a high level at the
+beginning of boot before the consumer device responsible for requesting the
+high level has probed.
+
+Optional properties:
+proxy-supply:			phandle of the regulator's own device node.
+				This property is required if any of the three
+				properties below are specified.
+qcom,proxy-consumer-enable:	Boolean indicating that the regulator must be
+				kept enabled during boot.
+qcom,proxy-consumer-voltage:	List of two integers corresponding the minimum
+				and maximum voltage allowed during boot in
+				microvolts.
+qcom,proxy-consumer-current:	Minimum current in microamps required during
+				boot.
+
+Example:
+
+	foo_vreg: regulator@0 {
+		regulator-name = "foo";
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <2000000>;
+		proxy-supply = <&foo_vreg>;
+		qcom,proxy-consumer-voltage = <1500000 2000000>;
+		qcom,proxy-consumer-current = <25000>;
+		qcom,proxy-consumer-enable;
+	};
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
index e16b9b5..bef9193 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
@@ -42,7 +42,7 @@
     cell 4: interrupt flags indicating level-sense information, as defined in
             dt-bindings/interrupt-controller/irq.h
 
-Example:
+Example V1 PMIC-Arbiter:
 
 	spmi {
 		compatible = "qcom,spmi-pmic-arb";
@@ -63,3 +63,26 @@
 		interrupt-controller;
 		#interrupt-cells = <4>;
 	};
+
+Example V2 PMIC-Arbiter:
+
+	spmi_bus: qcom,spmi@200f000 {
+		compatible = "qcom,spmi-pmic-arb";
+		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+		reg = <0x200f000 0xc00>,
+			<0x2400000 0x400000>,
+			<0x2c00000 0x400000>,
+			<0x3800000 0x200000>,
+			<0x200a000 0x2100>;
+
+		interrupt-names = "periph_irq";
+		interrupts = <0 190 0>;
+
+		qcom,ee = <0>;
+
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		interrupt-controller;
+		#interrupt-cells = <4>;
+	};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index f53280a..39062d5 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -409,50 +409,50 @@
 
 		frame@0x17CA0000 {
 			frame-number = <0>;
-			interrupts = <0 8 0x4>,
-				     <0 7 0x4>;
+			interrupts = <0 7 0x4>,
+				     <0 6 0x4>;
 			reg = <0x17CA0000 0x1000>,
 			      <0x17CB0000 0x1000>;
 		};
 
 		frame@17cc0000 {
 			frame-number = <1>;
-			interrupts = <0 9 0x4>;
+			interrupts = <0 8 0x4>;
 			reg = <0x17cc0000 0x1000>;
 			status = "disabled";
 		};
 
 		frame@17cd0000 {
 			frame-number = <2>;
-			interrupts = <0 10 0x4>;
+			interrupts = <0 9 0x4>;
 			reg = <0x17cd0000 0x1000>;
 			status = "disabled";
 		};
 
 		frame@17ce0000 {
 			frame-number = <3>;
-			interrupts = <0 11 0x4>;
+			interrupts = <0 10 0x4>;
 			reg = <0x17ce0000 0x1000>;
 			status = "disabled";
 		};
 
 		frame@17cf0000 {
 			frame-number = <4>;
-			interrupts = <0 12 0x4>;
+			interrupts = <0 11 0x4>;
 			reg = <0x17cf0000 0x1000>;
 			status = "disabled";
 		};
 
 		frame@17d00000 {
 			frame-number = <5>;
-			interrupts = <0 36 0x4>;
+			interrupts = <0 12 0x4>;
 			reg = <0x17d00000 0x1000>;
 			status = "disabled";
 		};
 
 		frame@17d10000 {
 			frame-number = <6>;
-			interrupts = <0 37 0x4>;
+			interrupts = <0 13 0x4>;
 			reg = <0x17d10000 0x1000>;
 			status = "disabled";
 		};
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/msmskunk-perf_defconfig
index 8136df2..5f3dda2e 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/msmskunk-perf_defconfig
@@ -12,6 +12,8 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -242,6 +244,7 @@
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 # CONFIG_DEVKMEM is not set
+CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -251,6 +254,7 @@
 CONFIG_SPMI=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_MFD_SPMI_PMIC=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/msmskunk_defconfig
index 8b75628..d576155 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/msmskunk_defconfig
@@ -13,6 +13,8 @@
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_DEBUG=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -248,6 +250,7 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_DIAG_CHAR=y
 CONFIG_HVC_DCC=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
@@ -258,6 +261,7 @@
 CONFIG_SPMI=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_MFD_SPMI_PMIC=y
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 163c5f2..5fc1112 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -41,6 +41,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/kryo3xx-arm64-edac.h>
+#include <soc/qcom/scm.h>
 
 static const char *fault_name(unsigned int esr);
 
@@ -445,6 +446,23 @@
 	return 0;
 }
 
+static int do_tlb_conf_fault(unsigned long addr,
+				unsigned int esr,
+				struct pt_regs *regs)
+{
+#define SCM_TLB_CONFLICT_CMD	0x1B
+	struct scm_desc desc = {
+		.args[0] = addr,
+		.arginfo = SCM_ARGS(1),
+	};
+
+	if (scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_MP, SCM_TLB_CONFLICT_CMD),
+						&desc))
+		return 1;
+
+	return 0;
+}
+
 /*
  * First Level Translation Fault Handler
  *
@@ -543,7 +561,7 @@
 	{ do_bad,		SIGBUS,  0,		"unknown 45"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 46"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 47"			},
-	{ do_bad,		SIGBUS,  0,		"TLB conflict abort"		},
+	{ do_tlb_conf_fault,	SIGBUS,  0,		"TLB conflict abort"		},
 	{ do_bad,		SIGBUS,  0,		"unknown 49"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 50"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 51"			},
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c28e1a..08f512b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -180,10 +180,105 @@
 };
 #endif
 
+#ifdef CONFIG_SCHED_HMP
+
+static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
+
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int err;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+	if (err)
+		return err;
+
+	err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
+
+	if (err >= 0)
+		err = count;
+
+	return err;
+}
+
+static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	ssize_t rc;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
+
+	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
+
+	return rc;
+}
+
+static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int err;
+	int cpuid = cpu->dev.id;
+	unsigned int pwr_cost;
+
+	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
+	if (err)
+		return err;
+
+	err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
+
+	if (err >= 0)
+		err = count;
+
+	return err;
+}
+
+static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
+					show_sched_static_cpu_pwr_cost,
+					store_sched_static_cpu_pwr_cost);
+static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
+					show_sched_static_cluster_pwr_cost,
+					store_sched_static_cluster_pwr_cost);
+
+static struct attribute *hmp_sched_cpu_attrs[] = {
+	&dev_attr_sched_static_cpu_pwr_cost.attr,
+	&dev_attr_sched_static_cluster_pwr_cost.attr,
+	NULL
+};
+
+static struct attribute_group sched_hmp_cpu_attr_group = {
+	.attrs = hmp_sched_cpu_attrs,
+};
+
+#endif /* CONFIG_SCHED_HMP */
 static const struct attribute_group *common_cpu_attr_groups[] = {
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
+#ifdef CONFIG_SCHED_HMP
+	&sched_hmp_cpu_attr_group,
+#endif
 	NULL
 };
 
@@ -191,6 +286,9 @@
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
+#ifdef CONFIG_SCHED_HMP
+	&sched_hmp_cpu_attr_group,
+#endif
 	NULL
 };
 
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 2a4435d..4b97c126 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -86,6 +86,9 @@
 
 	struct list_head debugfs_off_cache;
 	struct mutex cache_lock;
+
+	unsigned int dump_address;
+	unsigned int dump_count;
 #endif
 
 	unsigned int max_register;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce351..b4c5224 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -269,7 +269,7 @@
 				   count, ppos);
 }
 
-#undef REGMAP_ALLOW_WRITE_DEBUGFS
+#define REGMAP_ALLOW_WRITE_DEBUGFS
 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
 /*
  * This can be dangerous especially when we have clients such as
@@ -320,6 +320,67 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t regmap_data_read_file(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct regmap *map = file->private_data;
+	int new_count;
+
+	regmap_calc_tot_len(map, NULL, 0);
+	new_count = map->dump_count * map->debugfs_tot_len;
+	if (new_count > count)
+		new_count = count;
+
+	if (*ppos == 0)
+		*ppos = map->dump_address * map->debugfs_tot_len;
+	else if (*ppos >= map->dump_address * map->debugfs_tot_len
+			+ map->dump_count * map->debugfs_tot_len)
+		return 0;
+	return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+			new_count, ppos);
+}
+
+#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+static ssize_t regmap_data_write_file(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+	char *start = buf;
+	unsigned long value;
+	struct regmap *map = file->private_data;
+	int ret;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	while (*start == ' ')
+		start++;
+	if (kstrtoul(start, 16, &value))
+		return -EINVAL;
+
+	/* Userspace has been fiddling around behind the kernel's back */
+	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+	ret = regmap_write(map, map->dump_address, value);
+	if (ret < 0)
+		return ret;
+	return buf_size;
+}
+#else
+#define regmap_data_write_file NULL
+#endif
+
+static const struct file_operations regmap_data_fops = {
+	.open = simple_open,
+	.read = regmap_data_read_file,
+	.write = regmap_data_write_file,
+	.llseek = default_llseek,
+};
+
 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
 				      size_t count, loff_t *ppos)
 {
@@ -580,6 +641,15 @@
 
 		debugfs_create_file("registers", registers_mode, map->debugfs,
 				    map, &regmap_map_fops);
+
+		debugfs_create_x32("address", 0600, map->debugfs,
+				    &map->dump_address);
+		map->dump_count = 1;
+		debugfs_create_u32("count", 0600, map->debugfs,
+				    &map->dump_count);
+		debugfs_create_file("data", registers_mode, map->debugfs,
+				    map, &regmap_data_fops);
+
 		debugfs_create_file("access", 0400, map->debugfs,
 				    map, &regmap_access_fops);
 	}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 26b6bc8..62c6579 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,4 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
-ccflags-y += -Idrivers/gpu/drm/msm/display-manager
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
@@ -43,26 +42,11 @@
 	sde/sde_kms.o \
 	sde/sde_plane.o \
 	sde/sde_connector.o \
+	sde/sde_backlight.o \
 	sde/sde_color_processing.o \
 	sde/sde_vbif.o \
-	msm_atomic.o \
-	msm_debugfs.o \
-	msm_drv.o \
-	msm_fb.o \
-	msm_fence.o \
-	msm_gem.o \
-	msm_gem_prime.o \
-	msm_gem_shrinker.o \
-	msm_gem_submit.o \
-	msm_gpu.o \
-	msm_iommu.o \
-	msm_smmu.o \
-	msm_perf.o \
-	msm_rd.o \
-	msm_ringbuffer.o \
-	msm_evtlog.o \
-	sde_power_handle.o \
-	msm_prop.o \
+	sde_dbg_evtlog.o \
+	sde_io_util.o \
 
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
@@ -118,9 +102,11 @@
 				dsi-staging/dsi_panel.o \
 				dsi-staging/dsi_display_test.o
 
-obj-$(CONFIG_DRM_MSM)	+= msm_drm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
+				dsi/pll/dsi_pll_28nm.o
 
-obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \
+msm_drm-$(CONFIG_DRM_MSM) += \
+	sde/sde_hw_catalog.o \
 	sde/sde_hw_cdm.o \
 	sde/sde_hw_dspp.o \
 	sde/sde_hw_intf.o \
@@ -134,8 +120,25 @@
 	sde/sde_hw_interrupts.o \
 	sde/sde_hw_vbif.o \
 	sde/sde_formats.o \
+	sde_power_handle.o \
+	sde/sde_hw_color_processing_v1_7.o
 
-obj-$(CONFIG_DRM_MSM) += display-manager/display_manager.o
-
-obj-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
+msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
 	sde/sde_encoder_phys_wb.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
+	msm_atomic.o \
+	msm_drv.o \
+	msm_fb.o \
+	msm_gem.o \
+	msm_gem_prime.o \
+	msm_gem_submit.o \
+	msm_gpu.o \
+	msm_iommu.o \
+	msm_smmu.o \
+	msm_perf.o \
+	msm_rd.o \
+	msm_ringbuffer.o \
+	msm_prop.o
+
+obj-$(CONFIG_DRM_MSM)	+= msm_drm.o
diff --git a/drivers/gpu/drm/msm/display-manager/display_manager.c b/drivers/gpu/drm/msm/display-manager/display_manager.c
deleted file mode 100644
index 0b5d2f9..0000000
--- a/drivers/gpu/drm/msm/display-manager/display_manager.c
+++ /dev/null
@@ -1,611 +0,0 @@
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt)	"dm-drm:[%s] " fmt, __func__
-#include <linux/of_device.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/clk.h>
-#include <linux/msm-bus.h>
-#include <linux/of_irq.h>
-
-#include "msm_drv.h"
-#include "msm_kms.h"
-#include "msm_gpu.h"
-#include "sde_connector.h"
-
-#include "dsi_display.h"
-#include "dsi_drm.h"
-#include "sde_wb.h"
-#include "display_manager.h"
-
-/**
- * _dm_cache_active_displays - determine display type based on index
- * @disp_m: Pointer to display manager structure
- * Returns: Number of active displays in the system
- */
-static u32 _dm_cache_active_displays(struct display_manager *disp_m)
-{
-	u32 count;
-
-	if (!disp_m)
-		return 0;
-
-	disp_m->display_count = 0;
-
-	/* query dsi displays */
-	disp_m->dsi_display_count = dsi_display_get_num_of_displays();
-
-	/* query hdmi displays */
-	disp_m->hdmi_display_count = 0;
-
-	/* query dp displays */
-	disp_m->dp_display_count = 0;
-
-	/* query wb displays */
-	disp_m->wb_display_count = sde_wb_get_num_of_displays();
-	DBG("wb display count=%d", disp_m->wb_display_count);
-
-	count = disp_m->dsi_display_count
-		+ disp_m->hdmi_display_count
-		+ disp_m->dp_display_count
-		+ disp_m->wb_display_count;
-
-	disp_m->displays = kcalloc(count, sizeof(void *), GFP_KERNEL);
-	if (!disp_m->displays) {
-		disp_m->dsi_displays = 0;
-		disp_m->dsi_display_count = 0;
-
-		disp_m->hdmi_displays = 0;
-		disp_m->hdmi_display_count = 0;
-
-		disp_m->dp_displays = 0;
-		disp_m->dp_display_count = 0;
-
-		disp_m->wb_displays = 0;
-		disp_m->wb_display_count = 0;
-	} else {
-		/* get final dsi display list */
-		disp_m->dsi_displays = disp_m->displays;
-		disp_m->dsi_display_count =
-			dsi_display_get_active_displays(disp_m->dsi_displays,
-					disp_m->dsi_display_count);
-
-		/* get final hdmi display list */
-		disp_m->hdmi_displays = disp_m->dsi_displays
-			+ disp_m->dsi_display_count;
-		disp_m->hdmi_display_count = 0;
-
-		/* get final dp display list */
-		disp_m->dp_displays = disp_m->hdmi_displays
-			+ disp_m->hdmi_display_count;
-		disp_m->dp_display_count = 0;
-
-		/* get final wb display list */
-		disp_m->wb_displays = disp_m->dp_displays
-			+ disp_m->dp_display_count;
-		disp_m->wb_display_count =
-			wb_display_get_displays(disp_m->wb_displays,
-					disp_m->wb_display_count);
-	}
-
-	/* set final display count */
-	disp_m->display_count = disp_m->dsi_display_count
-		+ disp_m->hdmi_display_count
-		+ disp_m->dp_display_count
-		+ disp_m->wb_display_count;
-
-	return disp_m->display_count;
-}
-
-/**
- * _dm_get_type_by_index - determine display type based on index
- * @disp_m: Pointer to display manager structure
- * @display_index: Incoming display index
- * Returns: DRM_MODE_CONNECTOR_ definition corresponding to display_index
- */
-static int _dm_get_type_by_index(struct display_manager *disp_m,
-				      u32 display_index)
-{
-	if (disp_m) {
-		if (display_index < disp_m->dsi_display_count)
-			return DRM_MODE_CONNECTOR_DSI;
-		display_index -= disp_m->dsi_display_count;
-
-		if (display_index < disp_m->hdmi_display_count)
-			return DRM_MODE_CONNECTOR_HDMIA;
-		display_index -= disp_m->hdmi_display_count;
-
-		if (display_index < disp_m->dp_display_count)
-			return DRM_MODE_CONNECTOR_DisplayPort;
-		display_index -= disp_m->dp_display_count;
-
-		if (display_index < disp_m->wb_display_count)
-			return DRM_MODE_CONNECTOR_VIRTUAL;
-		display_index -= disp_m->wb_display_count;
-	}
-	return DRM_MODE_CONNECTOR_Unknown;
-}
-
-/**
- * _dm_init_active_displays - initialize active display drivers
- * @disp_m: Pointer to display manager structure
- * Returns: Zero on success
- */
-static int _dm_init_active_displays(struct display_manager *disp_m)
-{
-	void *display;
-	int rc = 0;
-	int dsi_idx, wb_idx;
-
-	for (dsi_idx = 0; dsi_idx < disp_m->dsi_display_count; dsi_idx++) {
-		display = disp_m->dsi_displays[dsi_idx];
-
-		rc = dsi_display_dev_init(display);
-		if (rc) {
-			pr_err("failed to init dsi display, rc=%d\n", rc);
-			goto error_deinit_dsi_displays;
-		}
-	}
-
-	for (wb_idx = 0; wb_idx < disp_m->wb_display_count; wb_idx++) {
-		display = disp_m->wb_displays[wb_idx];
-
-		rc = sde_wb_dev_init(display);
-		if (rc) {
-			pr_err("failed to init wb display, rc=%d\n", rc);
-			goto error_deinit_sde_wb;
-		}
-	}
-
-	/* TODO: INIT HDMI and DP displays here */
-	return rc;
-
-error_deinit_sde_wb:
-	for (wb_idx = wb_idx - 1; wb_idx >= 0; wb_idx--) {
-		display = disp_m->wb_displays[wb_idx];
-		(void)sde_wb_dev_deinit(display);
-	}
-
-error_deinit_dsi_displays:
-	for (dsi_idx = dsi_idx - 1; dsi_idx >= 0; dsi_idx--) {
-		display = disp_m->dsi_displays[dsi_idx];
-		(void)dsi_display_dev_deinit(display);
-	}
-
-	return rc;
-}
-
-/**
- * _dm_deinit_active_displays - deconstruct active display drivers
- * @disp_m: Pointer to display manager structure
- * Returns: Zero on success
- */
-static void _dm_deinit_active_displays(struct display_manager *disp_m)
-{
-	void *display;
-	int rc, i;
-
-	for (i = 0; i < disp_m->wb_display_count; i++) {
-		display = disp_m->wb_displays[i];
-
-		rc = sde_wb_dev_deinit(display);
-		if (rc)
-			pr_err("failed to deinit wb display, rc=%d\n", rc);
-	}
-
-	for (i = 0; i < disp_m->dsi_display_count; i++) {
-		display = disp_m->dsi_displays[i];
-		rc = dsi_display_dev_deinit(display);
-		if (rc)
-			pr_err("failed to deinit dsi display, rc=%d\n", rc);
-	}
-
-	/* TODO: DEINIT HDMI and DP displays here */
-}
-
-static int disp_manager_comp_ops_bind(struct device *dev,
-				     struct device *master,
-				     void *data)
-{
-	struct drm_device *drm;
-	struct msm_drm_private *priv;
-	struct display_manager *disp_m;
-	void *display;
-	int dsi_idx, wb_idx;
-	int rc = -EINVAL;
-
-	if (master && dev) {
-		drm = dev_get_drvdata(master);
-		disp_m = platform_get_drvdata(to_platform_device(dev));
-		if (drm && drm->dev_private && disp_m)
-			rc = 0;
-	}
-
-	if (rc) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	priv = drm->dev_private;
-	disp_m->drm_dev = drm;
-
-	/* DSI displays */
-	for (dsi_idx = 0; dsi_idx < disp_m->dsi_display_count; dsi_idx++) {
-		display = disp_m->dsi_displays[dsi_idx];
-
-		rc = dsi_display_bind(display, drm);
-		if (rc) {
-			if (rc != -EPROBE_DEFER)
-				pr_err("Failed to bind dsi display_%d, rc=%d\n",
-					dsi_idx, rc);
-			goto error_unbind_dsi;
-		}
-	}
-
-	/* WB displays */
-	for (wb_idx = 0; wb_idx < disp_m->wb_display_count; wb_idx++) {
-		display = disp_m->wb_displays[wb_idx];
-
-		rc = sde_wb_bind(display, drm);
-		if (rc) {
-			pr_err("Failed to bind wb display_%d, rc=%d\n",
-				wb_idx, rc);
-			goto error_unbind_wb;
-		}
-	}
-
-	/* TODO: BIND HDMI display here */
-	/* TODO: BIND DP display here */
-	priv->dm = disp_m;
-	return rc;
-
-error_unbind_wb:
-	for (wb_idx = wb_idx - 1; wb_idx >= 0; wb_idx--) {
-		display = disp_m->wb_displays[wb_idx];
-		(void)sde_wb_unbind(display);
-	}
-
-error_unbind_dsi:
-	for (dsi_idx = dsi_idx - 1; dsi_idx >= 0; dsi_idx--) {
-		display = disp_m->dsi_displays[dsi_idx];
-		(void)dsi_display_unbind(display);
-	}
-	return rc;
-}
-
-static void disp_manager_comp_ops_unbind(struct device *dev,
-					struct device *master,
-					void *data)
-{
-	int rc = 0;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct display_manager *disp_m;
-	void *display;
-	int i;
-
-	if (!dev) {
-		pr_err("Invalid params\n");
-		return;
-	}
-
-	disp_m = platform_get_drvdata(pdev);
-
-	/* WB displays */
-	for (i = 0; i < disp_m->wb_display_count; i++) {
-		display = disp_m->wb_displays[i];
-
-		rc = sde_wb_unbind(display);
-		if (rc)
-			pr_err("failed to unbind wb display_%d, rc=%d\n",
-			       i, rc);
-	}
-
-	/* DSI displays */
-	for (i = 0; i < disp_m->dsi_display_count; i++) {
-		display = disp_m->dsi_displays[i];
-
-		rc = dsi_display_unbind(display);
-		if (rc)
-			pr_err("failed to unbind dsi display_%d, rc=%d\n",
-			       i, rc);
-	}
-
-	/* TODO: UNBIND HDMI display here */
-	/* TODO: UNBIND DP display here */
-}
-
-static const struct of_device_id displays_dt_match[] = {
-	{.compatible = "qcom,dsi-display"},
-	{.compatible = "qcom,hdmi-display"},
-	{.compatible = "qcom,dp-display"},
-	{.compatible = "qcom,wb-display"},
-	{}
-};
-
-static const struct component_ops disp_manager_comp_ops = {
-	.bind = disp_manager_comp_ops_bind,
-	.unbind = disp_manager_comp_ops_unbind,
-};
-
-static int disp_manager_dev_probe(struct platform_device *pdev)
-{
-	struct display_manager *disp_m;
-	int rc = 0;
-
-	if (!pdev || !pdev->dev.of_node) {
-		pr_err("pdev not found\n");
-		return -ENODEV;
-	}
-
-	disp_m = devm_kzalloc(&pdev->dev, sizeof(*disp_m), GFP_KERNEL);
-	if (!disp_m)
-		return -ENOMEM;
-
-	disp_m->name = "qcom,display-manager";
-
-	of_platform_populate(pdev->dev.of_node, displays_dt_match,
-			     NULL, &pdev->dev);
-
-	disp_m->display_count = _dm_cache_active_displays(disp_m);
-	if (!disp_m->display_count) {
-		rc = -ENODEV;
-		pr_err("no displays found, rc=%d\n", rc);
-		goto error_free_disp_m;
-	}
-
-	rc = _dm_init_active_displays(disp_m);
-	if (rc) {
-		pr_err("failed to initialize displays, rc=%d\n", rc);
-		goto error_remove_displays;
-	}
-
-	rc = component_add(&pdev->dev, &disp_manager_comp_ops);
-	if (rc) {
-		pr_err("failed to add component, rc=%d\n", rc);
-		goto error_deinit_displays;
-	}
-
-	mutex_init(&disp_m->lock);
-	platform_set_drvdata(pdev, disp_m);
-
-	return rc;
-error_deinit_displays:
-	_dm_deinit_active_displays(disp_m);
-error_remove_displays:
-	of_platform_depopulate(&pdev->dev);
-error_free_disp_m:
-	devm_kfree(&pdev->dev, disp_m);
-	return rc;
-}
-
-static int disp_manager_dev_remove(struct platform_device *pdev)
-{
-	struct display_manager *disp_m;
-
-	if (!pdev) {
-		pr_err("invalid pdev argument\n");
-		return -ENODEV;
-	}
-
-	disp_m = platform_get_drvdata(pdev);
-
-	_dm_deinit_active_displays(disp_m);
-	of_platform_depopulate(&pdev->dev);
-	devm_kfree(&pdev->dev, disp_m);
-
-	return 0;
-}
-
-static const struct of_device_id disp_manager_dt_match[] = {
-	{.compatible = "qcom,display-manager"},
-	{}
-};
-
-static struct platform_driver disp_manager_driver = {
-	.probe = disp_manager_dev_probe,
-	.remove = disp_manager_dev_remove,
-	.driver = {
-		.name = "msm-display-manager",
-		.of_match_table = disp_manager_dt_match,
-	},
-};
-
-int display_manager_get_count(struct display_manager *disp_m)
-{
-	int count;
-
-	if (!disp_m) {
-		pr_err("invalid params\n");
-		return 0;
-	}
-
-	mutex_lock(&disp_m->lock);
-
-	count = disp_m->display_count;
-
-	mutex_unlock(&disp_m->lock);
-	return count;
-}
-
-int display_manager_get_info_by_index(struct display_manager *disp_m,
-				      u32 display_index,
-				      struct msm_display_info *info)
-{
-	void *display;
-	int rc = 0;
-
-	if (!disp_m || !info || (display_index >= disp_m->display_count)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	memset(info, 0, sizeof(*info));
-
-	mutex_lock(&disp_m->lock);
-
-	display = disp_m->displays[display_index];
-
-	switch (_dm_get_type_by_index(disp_m, display_index)) {
-	case DRM_MODE_CONNECTOR_DSI:
-		rc = dsi_display_get_info(info, display);
-		if (rc) {
-			pr_err("failed to get dsi info, rc=%d\n", rc);
-			rc = -EINVAL;
-		}
-		break;
-	case DRM_MODE_CONNECTOR_VIRTUAL:
-		rc = sde_wb_get_info(info, display);
-		if (rc) {
-			pr_err("failed to get wb info, rc=%d\n", rc);
-			rc = -EINVAL;
-		}
-		break;
-	default:
-		pr_err("invalid index %d\n", display_index);
-		rc = -EINVAL;
-		break;
-	}
-	mutex_unlock(&disp_m->lock);
-	return rc;
-}
-
-int display_manager_drm_init_by_index(struct display_manager *disp_m,
-				      u32 display_index,
-				      struct drm_encoder *encoder)
-{
-	static const struct sde_connector_ops dsi_ops = {
-		.post_init =  dsi_conn_post_init,
-		.detect =     dsi_conn_detect,
-		.get_modes =  dsi_connector_get_modes,
-		.mode_valid = dsi_conn_mode_valid,
-		.get_info =   dsi_display_get_info,
-	};
-	static const struct sde_connector_ops wb_ops = {
-		.post_init =    sde_wb_connector_post_init,
-		.detect =       sde_wb_connector_detect,
-		.get_modes =    sde_wb_connector_get_modes,
-		.set_property = sde_wb_connector_set_property,
-		.get_info =     sde_wb_get_info,
-	};
-	void *display;
-	int rc = -EINVAL;
-	struct drm_connector *connector;
-
-	if (!disp_m || !encoder || (display_index >= disp_m->display_count)) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&disp_m->lock);
-
-	display = disp_m->displays[display_index];
-
-	switch (_dm_get_type_by_index(disp_m, display_index)) {
-	case DRM_MODE_CONNECTOR_DSI:
-		rc = dsi_display_drm_bridge_init(display, encoder);
-		if (rc) {
-			pr_err("dsi bridge init failed\n");
-			break;
-		}
-
-		connector = sde_connector_init(disp_m->drm_dev,
-				encoder,
-				0,
-				display,
-				&dsi_ops,
-				DRM_CONNECTOR_POLL_HPD,
-				DRM_MODE_CONNECTOR_DSI);
-		if (!connector)
-			rc = -ENOMEM;
-		else if (IS_ERR(connector))
-			rc = PTR_ERR(connector);
-		break;
-	case DRM_MODE_CONNECTOR_VIRTUAL:
-		rc = sde_wb_drm_init(display, encoder);
-		if (rc) {
-			pr_err("writeback init failed\n");
-			break;
-		}
-
-		connector = sde_connector_init(disp_m->drm_dev,
-				encoder,
-				0,
-				display,
-				&wb_ops,
-				DRM_CONNECTOR_POLL_HPD,
-				DRM_MODE_CONNECTOR_VIRTUAL);
-		if (!connector)
-			rc = -ENOMEM;
-		else if (IS_ERR(connector))
-			rc = PTR_ERR(connector);
-		break;
-	default:
-		pr_err("invalid index %d\n", display_index);
-		break;
-	}
-
-	mutex_unlock(&disp_m->lock);
-
-	return rc;
-}
-
-int display_manager_drm_deinit_by_index(struct display_manager *disp_m,
-					u32 display_index)
-{
-	void *display;
-	int rc = 0;
-
-	if (!disp_m) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&disp_m->lock);
-
-	if (display_index < disp_m->display_count)
-		display = disp_m->displays[display_index];
-
-	switch (_dm_get_type_by_index(disp_m, display_index)) {
-	case DRM_MODE_CONNECTOR_DSI:
-		dsi_display_drm_bridge_deinit(display);
-		break;
-	default:
-		pr_err("invalid index\n");
-		rc = -EINVAL;
-		break;
-	}
-
-	mutex_unlock(&disp_m->lock);
-
-	return rc;
-}
-
-
-void display_manager_register(void)
-{
-	dsi_phy_drv_register();
-	dsi_ctrl_drv_register();
-	dsi_display_register();
-	sde_wb_register();
-	platform_driver_register(&disp_manager_driver);
-}
-void display_manager_unregister(void)
-{
-	platform_driver_unregister(&disp_manager_driver);
-	sde_wb_unregister();
-	dsi_display_unregister();
-	dsi_ctrl_drv_unregister();
-	dsi_phy_drv_unregister();
-}
diff --git a/drivers/gpu/drm/msm/display-manager/display_manager.h b/drivers/gpu/drm/msm/display-manager/display_manager.h
deleted file mode 100644
index d220fc0..0000000
--- a/drivers/gpu/drm/msm/display-manager/display_manager.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DISPLAY_MANAGER_H_
-#define _DISPLAY_MANAGER_H_
-
-struct display_manager {
-	struct drm_device *drm_dev;
-	struct platform_device *pdev;
-	const char *name;
-
-	struct mutex lock;
-
-	u32 display_count;
-	void **displays;
-
-	u32 dsi_display_count;
-	void **dsi_displays;
-
-	u32 hdmi_display_count;
-	void **hdmi_displays;
-
-	u32 dp_display_count;
-	void **dp_displays;
-
-	u32 wb_display_count;
-	void **wb_displays;
-
-	/* Debug fs */
-	struct dentry *debugfs_root;
-};
-
-/**
- * display_manager_get_count() - returns the number of display present
- * @disp_m:      Handle to Display manager.
- *
- * Returns the sum total of DSI, HDMI and DP display present on the board.
- *
- * Return: error code (< 0) in case of error or number of display ( >= 0)
- */
-int display_manager_get_count(struct display_manager *disp_m);
-
-/**
- * display_manager_get_info_by_index() - returns display information
- * @disp_m:        Handle to Display manager.
- * @display_index: display index (valid indices are 0 to (display_count - 1).
- * @info:          Structure where display info is copied.
- *
- * Return: error code.
- */
-int display_manager_get_info_by_index(struct display_manager *disp_m,
-				      u32 display_index,
-				      struct msm_display_info *info);
-
-/**
- * display_manager_drm_init_by_index() - initialize drm objects for display
- * @disp_m:         Handle to Display manager.
- * @display_index:  display index (valid indices are 0 to (display_count - 1).
- * @encoder:        Pointer to encoder object to which display is attached.
- *
- * Return: error code.
- */
-int display_manager_drm_init_by_index(struct display_manager *disp_m,
-				      u32 display_index,
-				      struct drm_encoder *encoder);
-
-/**
- * display_manager_drm_deinit_by_index() - detroys drm objects
- * @disp_m:         Handle to Display manager.
- * @display_index:  display index (valid indices are 0 to (display_count - 1).
- *
- * Return: error code.
- */
-int display_manager_drm_deinit_by_index(struct display_manager *disp_m,
-					u32 display_index);
-
-/**
- * display_manager_register() - register display interface drivers
- */
-void display_manager_register(void);
-
-/**
- * display_manager_unregister() - unregisters display interface drivers
- */
-void display_manager_unregister(void);
-
-#endif /* _DISPLAY_MANAGER_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 59e9899..5a166a4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -36,6 +36,24 @@
 
 static struct dsi_display *main_display;
 
+int dsi_display_set_backlight(void *display, u32 bl_lvl)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_panel *panel;
+	int rc = 0;
+
+	if (dsi_display == NULL)
+		return -EINVAL;
+
+	panel = dsi_display->panel;
+
+	rc = dsi_panel_set_backlight(panel, bl_lvl);
+	if (rc)
+		pr_err("unable to set backlight\n");
+
+	return rc;
+}
+
 static ssize_t debugfs_dump_info_read(struct file *file,
 				      char __user *buff,
 				      size_t count,
@@ -126,7 +144,7 @@
 	return rc;
 }
 
-static int dsi_dipslay_debugfs_deinit(struct dsi_display *display)
+static int dsi_display_debugfs_deinit(struct dsi_display *display)
 {
 	debugfs_remove_recursive(display->root);
 
@@ -1592,6 +1610,228 @@
 	return rc;
 }
 
+/**
+ * _dsi_display_dev_init - initializes the display device
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ * @display:         Handle to the display
+ * Returns:          Zero on success
+ */
+static int _dsi_display_dev_init(struct dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display) {
+		pr_err("invalid display\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_parse_dt(display);
+	if (rc) {
+		pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
+		goto error;
+	}
+
+	rc = dsi_display_res_init(display);
+	if (rc) {
+		pr_err("[%s] failed to initialize resources, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+/**
+ * _dsi_display_dev_deinit - deinitializes the display device
+ * All the resources acquired during device init will be released.
+ * @display:        Handle to the display
+ * Returns:         Zero on success
+ */
+static int _dsi_display_dev_deinit(struct dsi_display *display)
+{
+	int rc = 0;
+
+	if (!display) {
+		pr_err("invalid display\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_res_deinit(display);
+	if (rc)
+		pr_err("[%s] failed to deinitialize resource, rc=%d\n",
+		       display->name, rc);
+
+	mutex_unlock(&display->display_lock);
+
+	return rc;
+}
+
+/**
+ * dsi_display_bind - bind dsi device with controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ * Returns:     Zero on success
+ */
+static int dsi_display_bind(struct device *dev,
+		struct device *master,
+		void *data)
+{
+	struct dsi_display_ctrl *display_ctrl;
+	struct drm_device *drm;
+	struct dsi_display *display;
+	struct platform_device *pdev = to_platform_device(dev);
+	int i, rc = 0;
+
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+				dev, pdev, master);
+		return -EINVAL;
+	}
+
+	drm = dev_get_drvdata(master);
+	display = platform_get_drvdata(pdev);
+	if (!drm || !display) {
+		pr_err("invalid param(s), drm %pK, display %pK\n",
+				drm, display);
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_display_debugfs_init(display);
+	if (rc) {
+		pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		display_ctrl = &display->ctrl[i];
+
+		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
+		if (rc) {
+			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
+			       display->name, i, rc);
+			goto error_ctrl_deinit;
+		}
+
+		rc = dsi_phy_drv_init(display_ctrl->phy);
+		if (rc) {
+			pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
+				display->name, i, rc);
+			(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+			goto error_ctrl_deinit;
+		}
+	}
+
+	rc = dsi_display_mipi_host_init(display);
+	if (rc) {
+		pr_err("[%s] failed to initialize mipi host, rc=%d\n",
+		       display->name, rc);
+		goto error_ctrl_deinit;
+	}
+
+	rc = dsi_panel_drv_init(display->panel, &display->host);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("[%s] failed to initialize panel driver, rc=%d\n",
+			       display->name, rc);
+		goto error_host_deinit;
+	}
+
+	rc = dsi_panel_get_mode_count(display->panel, &display->num_of_modes);
+	if (rc) {
+		pr_err("[%s] failed to get mode count, rc=%d\n",
+		       display->name, rc);
+		goto error_panel_deinit;
+	}
+
+	display->drm_dev = drm;
+	goto error;
+
+error_panel_deinit:
+	(void)dsi_panel_drv_deinit(display->panel);
+error_host_deinit:
+	(void)dsi_display_mipi_host_deinit(display);
+error_ctrl_deinit:
+	for (i = i - 1; i >= 0; i--) {
+		display_ctrl = &display->ctrl[i];
+		(void)dsi_phy_drv_deinit(display_ctrl->phy);
+		(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+	}
+	(void)dsi_display_debugfs_deinit(display);
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
+/**
+ * dsi_display_unbind - unbind dsi from controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ */
+static void dsi_display_unbind(struct device *dev,
+		struct device *master, void *data)
+{
+	struct dsi_display_ctrl *display_ctrl;
+	struct dsi_display *display;
+	struct platform_device *pdev = to_platform_device(dev);
+	int i, rc = 0;
+
+	if (!dev || !pdev) {
+		pr_err("invalid param(s)\n");
+		return;
+	}
+
+	display = platform_get_drvdata(pdev);
+	if (!display) {
+		pr_err("invalid display\n");
+		return;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	rc = dsi_panel_drv_deinit(display->panel);
+	if (rc)
+		pr_err("[%s] failed to deinit panel driver, rc=%d\n",
+		       display->name, rc);
+
+	rc = dsi_display_mipi_host_deinit(display);
+	if (rc)
+		pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
+		       display->name,
+		       rc);
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		display_ctrl = &display->ctrl[i];
+
+		rc = dsi_phy_drv_deinit(display_ctrl->phy);
+		if (rc)
+			pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
+			       display->name, i, rc);
+
+		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+		if (rc)
+			pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
+			       display->name, i, rc);
+	}
+	(void)dsi_display_debugfs_deinit(display);
+
+	mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops dsi_display_comp_ops = {
+	.bind = dsi_display_bind,
+	.unbind = dsi_display_unbind,
+};
+
 static struct platform_driver dsi_display_driver = {
 	.probe = dsi_display_dev_probe,
 	.remove = dsi_display_dev_remove,
@@ -1632,8 +1872,19 @@
 	mutex_lock(&dsi_display_list_lock);
 	list_add(&display->list, &dsi_display_list);
 	mutex_unlock(&dsi_display_list_lock);
-	if (display->is_active)
+
+	if (display->is_active) {
 		main_display = display;
+		rc = _dsi_display_dev_init(display);
+		if (rc) {
+			pr_err("device init failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = component_add(&pdev->dev, &dsi_display_comp_ops);
+		if (rc)
+			pr_err("component add failed, rc=%d\n", rc);
+	}
 	return rc;
 }
 
@@ -1650,6 +1901,8 @@
 
 	display = platform_get_drvdata(pdev);
 
+	(void)_dsi_display_dev_deinit(display);
+
 	mutex_lock(&dsi_display_list_lock);
 	list_for_each_entry_safe(pos, tmp, &dsi_display_list, list) {
 		if (pos == display) {
@@ -1664,9 +1917,9 @@
 	return rc;
 }
 
-u32 dsi_display_get_num_of_displays(void)
+int dsi_display_get_num_of_displays(void)
 {
-	u32 count = 0;
+	int count = 0;
 	struct dsi_display *display;
 
 	mutex_lock(&dsi_display_list_lock);
@@ -1728,178 +1981,6 @@
 	mutex_unlock(&display->display_lock);
 }
 
-int dsi_display_dev_init(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_parse_dt(display);
-	if (rc) {
-		pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	rc = dsi_display_res_init(display);
-	if (rc) {
-		pr_err("[%s] failed to initialize resources, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_dev_deinit(struct dsi_display *display)
-{
-	int rc = 0;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_res_deinit(display);
-	if (rc)
-		pr_err("[%s] failed to deinitialize resource, rc=%d\n",
-		       display->name, rc);
-
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-int dsi_display_bind(struct dsi_display *display, struct drm_device *dev)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *display_ctrl;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_display_debugfs_init(display);
-	if (rc) {
-		pr_err("[%s]Debugfs init failed, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		display_ctrl = &display->ctrl[i];
-
-		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
-		if (rc) {
-			pr_err("[%s] Failed to initialize ctrl[%d], rc=%d\n",
-			       display->name, i, rc);
-			goto error_ctrl_deinit;
-		}
-
-		rc = dsi_phy_drv_init(display_ctrl->phy);
-		if (rc) {
-			pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
-				display->name, i, rc);
-			(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-			goto error_ctrl_deinit;
-		}
-	}
-
-	rc = dsi_display_mipi_host_init(display);
-	if (rc) {
-		pr_err("[%s] Failed to initialize mipi host, rc=%d\n",
-		       display->name, rc);
-		goto error_ctrl_deinit;
-	}
-
-	rc = dsi_panel_drv_init(display->panel, &display->host);
-	if (rc) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("[%s] Failed to initialize panel driver, rc=%d\n",
-			       display->name, rc);
-		goto error_host_deinit;
-	}
-
-	rc = dsi_panel_get_mode_count(display->panel, &display->num_of_modes);
-	if (rc) {
-		pr_err("[%s] Failed to get mode count, rc=%d\n",
-		       display->name, rc);
-		goto error_panel_deinit;
-	}
-
-	display->drm_dev = dev;
-	goto error;
-
-error_panel_deinit:
-	(void)dsi_panel_drv_deinit(display->panel);
-error_host_deinit:
-	(void)dsi_display_mipi_host_deinit(display);
-error_ctrl_deinit:
-	for (i = i - 1; i >= 0; i--) {
-		display_ctrl = &display->ctrl[i];
-		(void)dsi_phy_drv_deinit(display_ctrl->phy);
-		(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-	}
-	(void)dsi_dipslay_debugfs_deinit(display);
-error:
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
-int dsi_display_unbind(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *display_ctrl;
-
-	if (!display) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	rc = dsi_panel_drv_deinit(display->panel);
-	if (rc)
-		pr_err("[%s] failed to deinit panel driver, rc=%d\n",
-		       display->name, rc);
-
-	rc = dsi_display_mipi_host_deinit(display);
-	if (rc)
-		pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
-		       display->name,
-		       rc);
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		display_ctrl = &display->ctrl[i];
-
-		rc = dsi_phy_drv_deinit(display_ctrl->phy);
-		if (rc)
-			pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
-			       display->name, i, rc);
-
-		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
-		if (rc)
-			pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
-			       display->name, i, rc);
-	}
-
-	(void)dsi_dipslay_debugfs_deinit(display);
-
-	mutex_unlock(&display->display_lock);
-	return rc;
-}
-
 int dsi_display_drm_bridge_init(struct dsi_display *display,
 		struct drm_encoder *enc)
 {
@@ -1907,8 +1988,8 @@
 	struct dsi_bridge *bridge;
 	struct msm_drm_private *priv = NULL;
 
-	if (!display || !enc) {
-		pr_err("Invalid params\n");
+	if (!display || !display->drm_dev || !enc) {
+		pr_err("invalid param(s)\n");
 		return -EINVAL;
 	}
 
@@ -2489,12 +2570,19 @@
 	return rc;
 }
 
-void dsi_display_register(void)
+static int __init dsi_display_register(void)
 {
-	platform_driver_register(&dsi_display_driver);
+	dsi_phy_drv_register();
+	dsi_ctrl_drv_register();
+	return platform_driver_register(&dsi_display_driver);
 }
 
-void dsi_display_unregister(void)
+static void __exit dsi_display_unregister(void)
 {
 	platform_driver_unregister(&dsi_display_driver);
+	dsi_ctrl_drv_unregister();
+	dsi_phy_drv_unregister();
 }
+
+module_init(dsi_display_register);
+module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index f941cd4..b77bf26 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -159,22 +159,12 @@
 int dsi_display_dev_remove(struct platform_device *pdev);
 
 /**
- * dsi_display_register() - register dsi display platform driver
- */
-void dsi_display_register(void);
-
-/**
- * dsi_display_unregister() - unregister dsi display platform driver
- */
-void dsi_display_unregister(void);
-
-/**
  * dsi_display_get_num_of_displays() - returns number of display devices
  *				       supported.
  *
  * Return: number of displays.
  */
-u32 dsi_display_get_num_of_displays(void);
+int dsi_display_get_num_of_displays(void);
 
 /**
  * dsi_display_get_active_displays - returns pointers for active display devices
@@ -201,44 +191,6 @@
 void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
 
 /**
- * dsi_display_dev_init() - Initializes the display device
- * @display:         Handle to the display.
- *
- * Initialization will acquire references to the resources required for the
- * display hardware to function.
- *
- * Return: error code.
- */
-int dsi_display_dev_init(struct dsi_display *display);
-
-/**
- * dsi_display_dev_deinit() - Desinitializes the display device
- * @display:        Handle to the display.
- *
- * All the resources acquired during device init will be released.
- *
- * Return: error code.
- */
-int dsi_display_dev_deinit(struct dsi_display *display);
-
-/**
- * dsi_display_bind() - Binds the display device to the DRM device
- * @display:       Handle to the display.
- * @dev:           Pointer to the DRM device.
- *
- * Return: error code.
- */
-int dsi_display_bind(struct dsi_display *display, struct drm_device *dev);
-
-/**
- * dsi_display_unbind() - Unbinds the display device from the DRM device
- * @display:         Handle to the display.
- *
- * Return: error code.
- */
-int dsi_display_unbind(struct dsi_display *display);
-
-/**
  * dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
  * @display:            Handle to the display.
  * @encoder:            Pointer to the encoder object which is connected to the
@@ -380,4 +332,5 @@
 int dsi_display_clock_gate(struct dsi_display *display, bool enable);
 int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
 
+int dsi_display_set_backlight(void *display, u32 bl_lvl);
 #endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 6e753f0..28cfa1f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -358,6 +358,23 @@
 }
 #endif
 
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
+{
+	int rc = 0;
+	struct dsi_backlight_config *bl = &panel->bl_config;
+
+	switch (bl->type) {
+	case DSI_BACKLIGHT_WLED:
+		led_trigger_event(bl->wled, bl_lvl);
+		break;
+	default:
+		pr_err("Backlight type(%d) not supported\n", bl->type);
+		rc = -ENOTSUPP;
+	}
+
+	return rc;
+}
+
 static int dsi_panel_bl_register(struct dsi_panel *panel)
 {
 	int rc = 0;
@@ -1423,18 +1440,28 @@
 	if (rc) {
 		pr_debug("[%s] bl-min-level unspecified, defaulting to zero\n",
 			 panel->name);
-		panel->bl_config.min_level = 0;
+		panel->bl_config.bl_min_level = 0;
 	} else {
-		panel->bl_config.min_level = val;
+		panel->bl_config.bl_min_level = val;
 	}
 
 	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-max-level", &val);
 	if (rc) {
-		pr_debug("[%s] bl-max-level unspecified, defaulting to 255\n",
+		pr_debug("[%s] bl-max-level unspecified, defaulting to max level\n",
 			 panel->name);
-		panel->bl_config.max_level = 255;
+		panel->bl_config.bl_max_level = MAX_BL_LEVEL;
 	} else {
-		panel->bl_config.max_level = val;
+		panel->bl_config.bl_max_level = val;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,mdss-brightness-max-level",
+		&val);
+	if (rc) {
+		pr_debug("[%s] brigheness-max-level unspecified, defaulting to 255\n",
+			 panel->name);
+		panel->bl_config.brightness_max_level = 255;
+	} else {
+		panel->bl_config.brightness_max_level = val;
 	}
 
 	if (panel->bl_config.type == DSI_BACKLIGHT_PWM) {
@@ -1834,8 +1861,6 @@
 		pr_err("[%s] failed to send DSI_CMD_SET_ON cmds, rc=%d\n",
 		       panel->name, rc);
 	}
-	/* TODO:  hack to enable backlight; */
-	led_trigger_event(panel->bl_config.wled, panel->bl_config.max_level);
 	mutex_unlock(&panel->panel_lock);
 	return rc;
 }
@@ -1873,8 +1898,6 @@
 
 	mutex_lock(&panel->panel_lock);
 
-	/* TODO:  hack to disable backlight; */
-	led_trigger_event(panel->bl_config.wled, 0x0);
 	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_OFF);
 	if (rc) {
 		pr_err("[%s] failed to send DSI_CMD_SET_PRE_OFF cmds, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 1009d94..4d21a4c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -27,6 +27,8 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_clk_pwr.h"
 
+#define MAX_BL_LEVEL 4096
+
 enum dsi_panel_rotation {
 	DSI_PANEL_ROTATE_NONE = 0,
 	DSI_PANEL_ROTATE_HV_FLIP,
@@ -101,8 +103,9 @@
 struct dsi_backlight_config {
 	enum dsi_backlight_type type;
 
-	u32 min_level;
-	u32 max_level;
+	u32 bl_min_level;
+	u32 bl_max_level;
+	u32 brightness_max_level;
 
 	int en_gpio;
 	/* PWM params */
@@ -113,6 +116,7 @@
 
 	/* WLED params */
 	struct led_trigger *wled;
+	struct backlight_device *bd;
 };
 
 struct dsi_reset_seq {
@@ -195,4 +199,5 @@
 
 int dsi_panel_post_unprepare(struct dsi_panel *panel);
 
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
 #endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8e30f04..cccd367 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,7 +21,6 @@
 #include "msm_fence.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
-#include "display_manager.h"
 #include "sde_wb.h"
 
 /*
@@ -39,6 +38,7 @@
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+
 	if (priv->fbdev)
 		drm_fb_helper_hotplug_event(priv->fbdev);
 }
@@ -63,6 +63,29 @@
 	return idx;
 }
 
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	int idx;
+
+	if (priv->num_mmus <= 0) {
+		dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
+		return;
+	}
+
+	idx = priv->num_mmus - 1;
+
+	/* only support reverse-order deallocation */
+	if (priv->mmus[idx] != mmu) {
+		dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
+		return;
+	}
+
+	--priv->num_mmus;
+	priv->mmus[idx] = 0;
+}
+
+
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -116,6 +139,11 @@
 	return ptr;
 }
 
+void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
+{
+	devm_iounmap(&pdev->dev, addr);
+}
+
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
@@ -126,6 +154,7 @@
 u32 msm_readl(const void __iomem *addr)
 {
 	u32 val = readl(addr);
+
 	if (reglog)
 		printk(KERN_ERR "IO:R %p %08x\n", addr, val);
 	return val;
@@ -192,9 +221,8 @@
 
 static int msm_drm_uninit(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct drm_device *ddev = platform_get_drvdata(pdev);
-	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct platform_device *pdev = dev->platformdev;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
@@ -244,12 +272,18 @@
 
 	if (priv->vram.paddr) {
 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+
 		drm_mm_takedown(&priv->vram.mm);
 		dma_free_attrs(dev, priv->vram.size, NULL,
 			       priv->vram.paddr, attrs);
 	}
 
-	component_unbind_all(dev, ddev);
+	sde_evtlog_destroy();
+
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+	sde_power_resource_deinit(pdev, &priv->phandle);
+
+	component_unbind_all(dev->dev, dev);
 
 	msm_mdss_destroy(ddev);
 
@@ -314,7 +348,9 @@
 	node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
 	if (node) {
 		struct resource r;
+
 		ret = of_address_to_resource(node, 0, &r);
+
 		of_node_put(node);
 		if (ret)
 			return ret;
@@ -364,7 +400,13 @@
 static int msm_component_bind_all(struct device *dev,
 				struct drm_device *drm_dev)
 {
-	return component_bind_all(dev, drm_dev);
+	int ret;
+
+	ret = component_bind_all(dev, drm_dev);
+	if (ret)
+		DRM_ERROR("component_bind_all failed: %d\n", ret);
+
+	return ret;
 }
 #else
 static int msm_component_bind_all(struct device *dev,
@@ -382,15 +424,6 @@
 	struct msm_kms *kms;
 	int ret;
 
-	ddev = drm_dev_alloc(drv, dev);
-	if (IS_ERR(ddev)) {
-		dev_err(dev, "failed to allocate drm_device\n");
-		return PTR_ERR(ddev);
-	}
-
-	platform_set_drvdata(pdev, ddev);
-	ddev->platformdev = pdev;
-
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
 		drm_dev_unref(ddev);
@@ -400,31 +433,40 @@
 	ddev->dev_private = priv;
 	priv->dev = ddev;
 
-	ret = msm_mdss_init(ddev);
-	if (ret) {
-		kfree(priv);
-		drm_dev_unref(ddev);
-		return ret;
-	}
+	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
+	init_waitqueue_head(&priv->fence_event);
+	init_waitqueue_head(&priv->pending_crtcs_event);
+
+	INIT_LIST_HEAD(&priv->client_event_list);
+	INIT_LIST_HEAD(&priv->inactive_list);
+	INIT_LIST_HEAD(&priv->fence_cbs);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
+
+	drm_mode_config_init(dev);
 
 	priv->wq = alloc_ordered_workqueue("msm", 0);
 	priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
 	init_waitqueue_head(&priv->fence_event);
 	init_waitqueue_head(&priv->pending_crtcs_event);
 
-	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
+	ret = sde_power_resource_init(pdev, &priv->phandle);
+	if (ret) {
+		pr_err("sde power resource init failed\n");
+		goto fail;
+	}
 
-	drm_mode_config_init(ddev);
+	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
+	if (IS_ERR_OR_NULL(priv->pclient)) {
+		pr_err("sde power client create failed\n");
+		ret = -EINVAL;
+		goto fail;
+	}
 
 	/* Bind all our sub-components: */
-	ret = component_bind_all(dev, ddev);
-	if (ret) {
-		msm_mdss_destroy(ddev);
-		kfree(priv);
-		drm_dev_unref(ddev);
+	ret = msm_component_bind_all(dev->dev, dev);
+	if (ret)
 		return ret;
 	}
 
@@ -432,7 +474,11 @@
 	if (ret)
 		goto fail;
 
-	msm_gem_shrinker_init(ddev);
+	ret = sde_evtlog_init(dev->primary->debugfs_root);
+	if (ret) {
+		dev_err(dev->dev, "failed to init evtlog: %d\n", ret);
+		goto fail;
+	}
 
 	switch (get_mdp_ver(pdev)) {
 	case KMS_MDP4:
@@ -456,7 +502,8 @@
 		 * and (for example) use dmabuf/prime to share buffers with
 		 * imx drm driver on iMX5
 		 */
-		dev_err(dev, "failed to load kms\n");
+		priv->kms = NULL;
+		dev_err(dev->dev, "failed to load kms\n");
 		ret = PTR_ERR(kms);
 		goto fail;
 	}
@@ -722,6 +769,7 @@
 	struct drm_device *dev = arg;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	BUG_ON(!kms);
 	return kms->funcs->irq(kms);
 }
@@ -730,6 +778,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	BUG_ON(!kms);
 	kms->funcs->irq_preinstall(kms);
 }
@@ -738,6 +787,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	BUG_ON(!kms);
 	return kms->funcs->irq_postinstall(kms);
 }
@@ -746,6 +796,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	BUG_ON(!kms);
 	kms->funcs->irq_uninstall(kms);
 }
@@ -754,6 +805,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	if (!kms)
 		return -ENXIO;
 	DBG("dev=%p, crtc=%u", dev, pipe);
@@ -764,6 +816,7 @@
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
+
 	if (!kms)
 		return;
 	DBG("dev=%p, crtc=%u", dev, pipe);
@@ -1040,7 +1093,13 @@
 
 static int msm_drm_bind(struct device *dev)
 {
-	return drm_platform_init(&msm_driver, to_platform_device(dev));
+	int ret;
+
+	ret = drm_platform_init(&msm_driver, to_platform_device(dev));
+	if (ret)
+		DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+	return ret;
 }
 
 static void msm_drm_unbind(struct device *dev)
@@ -1136,48 +1195,11 @@
 
 static int compare_name_mdp(struct device *dev, void *data)
 {
-	return (strstr(dev_name(dev), "mdp") != NULL);
-}
-
-static int add_display_components(struct device *dev,
-				  struct component_match **matchptr)
-{
-	struct device *mdp_dev;
 	int ret;
 
-	/*
-	 * MDP5 based devices don't have a flat hierarchy. There is a top level
-	 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
-	 * children devices, find the MDP5 node, and then add the interfaces
-	 * to our components list.
-	 */
-	if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
-		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
-		if (ret) {
-			dev_err(dev, "failed to populate children devices\n");
-			return ret;
-		}
-
-		mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
-		if (!mdp_dev) {
-			dev_err(dev, "failed to find MDSS MDP node\n");
-			of_platform_depopulate(dev);
-			return -ENODEV;
-		}
-
-		put_device(mdp_dev);
-
-		/* add the MDP component itself */
-		component_match_add(dev, matchptr, compare_of,
-				    mdp_dev->of_node);
-	} else {
-		/* MDP4 */
-		mdp_dev = dev;
-	}
-
-	ret = add_components_mdp(mdp_dev, matchptr);
+	ret = component_master_add_with_match(dev, &msm_drm_ops, match);
 	if (ret)
-		of_platform_depopulate(dev);
+		DRM_ERROR("component add match failed: %d\n", ret);
 
 	return ret;
 }
@@ -1230,67 +1252,43 @@
 {
 	int ret;
 	struct component_match *match = NULL;
-	int ret;
+
+#ifdef CONFIG_OF
+	add_components(&pdev->dev, &match, "connectors");
+	add_components(&pdev->dev, &match, "gpus");
+#else
+	/* For non-DT case, it kinda sucks.  We don't actually have a way
+	 * to know whether or not we are waiting for certain devices (or if
+	 * they are simply not present).  But for non-DT we only need to
+	 * care about apq8064/apq8060/etc (all mdp4/a3xx):
+	 */
+	static const char * const devnames[] = {
+			"hdmi_msm.0", "kgsl-3d0.0",
+	};
+	int i;
+
+	DBG("Adding components..");
+
+	for (i = 0; i < ARRAY_SIZE(devnames); i++) {
+		struct device *dev;
 
 	ret = add_display_components(&pdev->dev, &match);
 	if (ret)
 		return ret;
 
-	ret = add_gpu_components(&pdev->dev, &match);
-	if (ret)
-		return ret;
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
-	init_waitqueue_head(&priv->fence_event);
-	init_waitqueue_head(&priv->pending_crtcs_event);
-
-	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->fence_cbs);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
-
-	platform_set_drvdata(pdev, priv);
-
-	ret = sde_power_resource_init(pdev, &priv->phandle);
-	if (ret) {
-		pr_err("sde power resource init failed\n");
-		goto hw_setup_failure;
+		component_match_add(&pdev->dev, &match, compare_dev, dev);
 	}
-
-	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
-	if (IS_ERR_OR_NULL(priv->pclient)) {
-		pr_err("sde power client create failed\n");
-		ret = -EINVAL;
-		goto client_create_err;
-	}
-
+#endif
 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
-	return msm_add_master_component(&pdev->dev, match);
-
-client_create_err:
-	sde_power_resource_deinit(pdev, &priv->phandle);
-hw_setup_failure:
-	kfree(priv);
+	ret = msm_add_master_component(&pdev->dev, match);
 
 	return ret;
 }
 
 static int msm_pdev_remove(struct platform_device *pdev)
 {
-	struct drm_device *drm_dev = platform_get_drvdata(pdev);
-	struct msm_drm_private *priv = drm_dev->dev_private;
-
-	component_master_del(&pdev->dev, &msm_drm_ops);
-	of_platform_depopulate(&pdev->dev);
-
 	msm_drm_unbind(&pdev->dev);
-	sde_power_resource_deinit(pdev, &priv->phandle);
+	component_master_del(&pdev->dev, &msm_drm_ops);
 	return 0;
 }
 
@@ -1325,8 +1323,6 @@
 static int __init msm_drm_register(void)
 {
 	DBG("init");
-	msm_mdp_register();
-	display_manager_register();
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
@@ -1342,7 +1338,6 @@
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
-	display_manager_unregister();
 }
 
 module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e5adb11..c2690ff 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,7 +32,7 @@
 #include <linux/types.h>
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
 #include <asm/sizes.h>
 
 #include <drm/drmP.h>
@@ -44,7 +44,6 @@
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
 
-#include "msm_evtlog.h"
 #include "sde_power_handle.h"
 
 #define GET_MAJOR_REV(rev)		((rev) >> 28)
@@ -79,8 +78,15 @@
 enum msm_mdp_plane_property {
 	/* blob properties, always put these first */
 	PLANE_PROP_SCALER_V1,
+	PLANE_PROP_SCALER_V2,
 	PLANE_PROP_CSC_V1,
 	PLANE_PROP_INFO,
+	PLANE_PROP_SCALER_LUT_ED,
+	PLANE_PROP_SCALER_LUT_CIR,
+	PLANE_PROP_SCALER_LUT_SEP,
+	PLANE_PROP_SKIN_COLOR,
+	PLANE_PROP_SKY_COLOR,
+	PLANE_PROP_FOLIAGE_COLOR,
 
 	/* # of blob properties */
 	PLANE_PROP_BLOBCOUNT,
@@ -92,6 +98,10 @@
 	PLANE_PROP_H_DECIMATE,
 	PLANE_PROP_V_DECIMATE,
 	PLANE_PROP_INPUT_FENCE,
+	PLANE_PROP_HUE_ADJUST,
+	PLANE_PROP_SATURATION_ADJUST,
+	PLANE_PROP_VALUE_ADJUST,
+	PLANE_PROP_CONTRAST_ADJUST,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_ROTATION,
@@ -208,7 +218,20 @@
 	enum msm_display_compression compression;
 };
 
-struct display_manager;
+/**
+ * struct msm_drm_event - defines custom event notification struct
+ * @base: base object required for event notification by DRM framework.
+ * @event: event object required for event notification by DRM framework.
+ * @info: contains information of DRM object for which events has been
+ *        requested.
+ * @data: memory location which contains response payload for event.
+ */
+struct msm_drm_event {
+	struct drm_pending_event base;
+	struct drm_event event;
+	struct drm_msm_event_req info;
+	u8 data[];
+};
 
 struct msm_drm_private {
 
@@ -239,9 +262,6 @@
 	/* DSI is shared by mdp4 and mdp5 */
 	struct msm_dsi *dsi[2];
 
-	/* Display manager for SDE driver */
-	struct display_manager *dm;
-
 	/* when we have more than one 'msm_gpu' these need to be an array: */
 	struct msm_gpu *gpu;
 	struct msm_file_private *lastctx;
@@ -303,28 +323,10 @@
 
 	struct msm_vblank_ctrl vblank_ctrl;
 
-	/* task holding struct_mutex.. currently only used in submit path
-	 * to detect and reject faults from copy_from_user() for submit
-	 * ioctl.
-	 */
-	struct task_struct *struct_mutex_task;
-
-	struct msm_evtlog evtlog;
+	/* list of clients waiting for events */
+	struct list_head client_event_list;
 };
 
-/* Helper macro for accessing msm_drm_private's event log */
-#define MSM_EVTMSG(dev, msg, x, y)  do {                                       \
-		if ((dev) && ((struct drm_device *)(dev))->dev_private)        \
-			msm_evtlog_sample(&((struct msm_drm_private *)         \
-					((struct drm_device *)                 \
-					(dev))->dev_private)->evtlog, __func__,\
-					(msg), (uint64_t)(x), (uint64_t)(y),   \
-					__LINE__);                             \
-	} while (0)
-
-/* Helper macro for accessing msm_drm_private's event log */
-#define MSM_EVT(dev, x, y) MSM_EVTMSG((dev), 0, (x), (y))
-
 struct msm_format {
 	uint32_t pixel_format;
 };
@@ -349,6 +351,7 @@
 		struct drm_atomic_state *state, bool nonblock);
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
@@ -474,6 +477,7 @@
 
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname);
+void msm_iounmap(struct platform_device *dev, void __iomem *addr);
 void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 
diff --git a/drivers/gpu/drm/msm/msm_evtlog.c b/drivers/gpu/drm/msm/msm_evtlog.c
index 0562be4..dbe9b88 100644
--- a/drivers/gpu/drm/msm/msm_evtlog.c
+++ b/drivers/gpu/drm/msm/msm_evtlog.c
@@ -19,6 +19,8 @@
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
 
+#include "sde_trace.h"
+
 #define SIZE_MASK(x) (x - 1)
 
 static int msm_evtlog_debugfs_dump(struct seq_file *s, void *data)
@@ -169,4 +171,6 @@
 	log->events[i].val2 = val2;
 	log->events[i].line = line;
 	log->events[i].pid = current->pid;
+
+	trace_sde_evtlog(func, line, val1, val2);
 }
diff --git a/drivers/gpu/drm/msm/msm_evtlog.h b/drivers/gpu/drm/msm/msm_evtlog.h
deleted file mode 100644
index 8351289..0000000
--- a/drivers/gpu/drm/msm/msm_evtlog.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef MSM_MSM_EVTLOG_H_
-#define MSM_MSM_EVTLOG_H_
-
-#include <linux/ktime.h>
-#include <linux/atomic.h>
-#include <linux/dcache.h>
-
-/**
- * struct msm_evtlog_evt - Event log entry
- * @ktime:     Timestamp of event
- * @func:      Calling function name
- * @msg:       User provided string
- * @val1:      User provided value
- * @val2:      User provided value
- * @line:      Line number of caller
- * @pid:       Process id of logger
- */
-struct msm_evtlog_evt {
-	ktime_t ktime;
-	const char *func;
-	const char *msg;
-	uint64_t val1;
-	uint64_t val2;
-	uint32_t line;
-	uint32_t pid;
-};
-
-/**
- * struct msm_evtlog - current driver state information
- * @events:    Pointer to dynamically allocated event log buffer
- * @cnt:       Atomic number of events since clear. Can be used to calculate
- *             the current index. Note: The count does not wrap.
- *             Reset the event log by setting to zero.
- *             Used for lock-less producer synchronization.
- * @size:      Size of events array. Must be power of 2 to facilitate fast
- *             increments by using a bitmask to get rollover.
- * @dentry:    Filesystem entry of debugfs registration
- */
-struct msm_evtlog {
-	struct msm_evtlog_evt *events;
-	atomic_t cnt;
-	unsigned long size;
-	struct dentry *dentry;
-};
-
-/**
- * msm_evtlog_init() - Create an event log, registered with debugfs.
- * @log:     Event log handle
- * @size:    Max # of events in buffer. Will be rounded up to power of 2.
- * @parent:  Parent directory entry for debugfs registration
- *
- * Return: error code.
- */
-int msm_evtlog_init(struct msm_evtlog *log, int size, struct dentry *parent);
-
-/**
- * msm_evtlog_destroy() - Destroy event log
- * @log:            Event log handle
- *
- * Unregisters debugfs node and frees memory.
- * Caller needs to make sure that log sampling has stopped.
- */
-void msm_evtlog_destroy(struct msm_evtlog *log);
-
-/**
- * msm_evtlog_sample() - Add entry to the event log
- * @evtlog:            Event log handle
- * @func:              Calling function name
- * @msg:               User provided string
- * @val1:              User provided value
- * @val2:              User provided value
- * @line:              Line number of caller
- */
-void msm_evtlog_sample(
-		struct msm_evtlog *log,
-		const char *func,
-		const char *msg,
-		uint64_t val1,
-		uint64_t val2,
-		uint32_t line);
-
-#endif /* MSM_MSM_EVTLOG_H_ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 3937521..0b23f5c 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -82,7 +82,20 @@
 
 static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
 {
-	DBG("detaching");
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+	if (!client) {
+		pr_err("undefined smmu client\n");
+		return;
+	}
+
+	if (!client->domain_attached)
+		return;
+
+	arm_iommu_detach_device(client->dev);
+	client->domain_attached = false;
+	dev_dbg(client->dev, "iommu domain detached\n");
 }
 
 static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -193,7 +206,8 @@
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct platform_device *pdev = to_platform_device(smmu->client_dev);
 
-	platform_device_unregister(pdev);
+	if (smmu->client_dev)
+		platform_device_unregister(pdev);
 	kfree(smmu);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.c b/drivers/gpu/drm/msm/sde/sde_backlight.c
new file mode 100644
index 0000000..9034eeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_connector.h"
+#include <linux/backlight.h>
+#include "dsi_drm.h"
+
+#define SDE_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+	out = (2 * (v) * (bl_max) + max_bright);\
+	do_div(out, 2 * max_bright);\
+} while (0)
+
+static int sde_backlight_device_update_status(struct backlight_device *bd)
+{
+	int brightness;
+	struct drm_connector *connector;
+	struct dsi_display *display;
+	struct sde_connector *c_conn;
+	int bl_lvl;
+
+	brightness = bd->props.brightness;
+
+	if ((bd->props.power != FB_BLANK_UNBLANK) ||
+			(bd->props.state & BL_CORE_FBBLANK) ||
+			(bd->props.state & BL_CORE_SUSPENDED))
+		brightness = 0;
+
+	connector = bl_get_data(bd);
+	c_conn = to_sde_connector(connector);
+	display = (struct dsi_display *) c_conn->display;
+	if (brightness > display->panel->bl_config.bl_max_level)
+		brightness = display->panel->bl_config.bl_max_level;
+
+	/* This maps UI brightness into driver backlight level with
+	 *        rounding
+	 */
+	SDE_BRIGHT_TO_BL(bl_lvl, brightness,
+			display->panel->bl_config.bl_max_level,
+			display->panel->bl_config.brightness_max_level);
+
+	if (!bl_lvl && brightness)
+		bl_lvl = 1;
+
+	if (c_conn->ops.set_backlight)
+		c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+
+	return 0;
+}
+
+static int sde_backlight_device_get_brightness(struct backlight_device *bd)
+{
+	return 0;
+}
+
+static const struct backlight_ops sde_backlight_device_ops = {
+	.update_status = sde_backlight_device_update_status,
+	.get_brightness = sde_backlight_device_get_brightness,
+};
+
+int sde_backlight_setup(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct dsi_display *display;
+	struct dsi_backlight_config *bl_config;
+
+	if (!connector)
+		return -EINVAL;
+
+	c_conn = to_sde_connector(connector);
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.power = FB_BLANK_UNBLANK;
+
+	switch (c_conn->connector_type) {
+	case DRM_MODE_CONNECTOR_DSI:
+		display = (struct dsi_display *) c_conn->display;
+		bl_config = &display->panel->bl_config;
+		props.max_brightness = bl_config->brightness_max_level;
+		props.brightness = bl_config->brightness_max_level;
+		bd = backlight_device_register("sde-backlight",
+				connector->kdev,
+				connector,
+				&sde_backlight_device_ops, &props);
+		if (IS_ERR(bd)) {
+			pr_err("Failed to register backlight: %ld\n",
+					    PTR_ERR(bd));
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.h b/drivers/gpu/drm/msm/sde/sde_backlight.h
new file mode 100644
index 0000000..1ea1305
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_BACKLIGHT_H_
+#define _SDE_BACKLIGHT_H_
+
+int sde_backlight_setup(struct drm_connector *connector);
+
+#endif /* _SDE_BACKLIGHT_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 0227b59..4931e3c 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -32,34 +32,26 @@
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
-	void (*dspp_feature_op)(struct sde_hw_dspp *ctx, void *cfg);
-	void (*lm_feature_op)(struct sde_hw_mixer *mixer, void *cfg);
+	bool is_dspp_feature;
 };
 
 struct sde_cp_prop_attach {
 	struct drm_crtc *crtc;
 	struct drm_property *prop;
 	struct sde_cp_node *prop_node;
-	const struct sde_pp_blk *pp_blk;
 	u32 feature;
-	void *ops;
 	uint64_t val;
 };
 
-static void dspp_pcc_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp);
+static void dspp_pcc_install_property(struct drm_crtc *crtc);
 
-static void dspp_hsic_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp);
+static void dspp_hsic_install_property(struct drm_crtc *crtc);
 
-static void dspp_ad_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp);
+static void dspp_ad_install_property(struct drm_crtc *crtc);
 
-static void dspp_vlut_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp);
+static void dspp_vlut_install_property(struct drm_crtc *crtc);
 
-typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp);
+typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
 
@@ -71,13 +63,11 @@
 	func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
 } while (0)
 
-typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc,
-					struct sde_hw_mixer *hw_mixer);
+typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
 
 static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
 
-static void lm_gc_install_property(struct drm_crtc *crtc,
-				     struct sde_hw_mixer *hw_mixer);
+static void lm_gc_install_property(struct drm_crtc *crtc);
 
 #define setup_lm_prop_install_funcs(func) \
 	(func[SDE_MIXER_GC] = lm_gc_install_property)
@@ -110,14 +100,12 @@
 	SDE_CP_CRTC_MAX_FEATURES,
 };
 
-#define INIT_PROP_ATTACH(p, crtc, prop, node, blk, feature, func, val) \
+#define INIT_PROP_ATTACH(p, crtc, prop, node, feature, val) \
 	do { \
 		(p)->crtc = crtc; \
 		(p)->prop = prop; \
 		(p)->prop_node = node; \
-		(p)->pp_blk = blk; \
 		(p)->feature = feature; \
-		(p)->ops = func; \
 		(p)->val = val; \
 	} while (0)
 
@@ -281,14 +269,6 @@
 	return ret;
 }
 
-static int sde_cp_crtc_get_mixer_idx(struct sde_crtc *sde_crtc)
-{
-	if (sde_crtc->num_mixers)
-		return sde_crtc->mixers[0].hw_lm->idx;
-	else
-		return -EINVAL;
-}
-
 static struct sde_kms *get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -310,12 +290,11 @@
 	prop_attach->prop_node->property_id = prop_attach->prop->base.id;
 	prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
 	prop_attach->prop_node->feature = prop_attach->feature;
-	prop_attach->prop_node->pp_blk = prop_attach->pp_blk;
 
 	if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
-		prop_attach->prop_node->dspp_feature_op = prop_attach->ops;
+		prop_attach->prop_node->is_dspp_feature = true;
 	else
-		prop_attach->prop_node->lm_feature_op = prop_attach->ops;
+		prop_attach->prop_node->is_dspp_feature = false;
 
 	list_add(&prop_attach->prop_node->feature_list,
 		 &sde_crtc->feature_list);
@@ -375,15 +354,14 @@
 		priv->cp_property[feature] = prop;
 	}
 
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, NULL,
-				feature, NULL, val);
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
 static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
 					     char *name,
-					     const struct sde_pp_blk *pp_blk,
-					     u32 feature, void *ops,
+					     u32 feature,
 					     uint64_t min, uint64_t max,
 					     uint64_t val)
 {
@@ -415,15 +393,14 @@
 		priv->cp_property[feature] = prop;
 	}
 
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, pp_blk,
-				feature, ops, val);
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
 
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
 static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
-					     const struct sde_pp_blk *pp_blk,
-					     u32 feature, void *ops)
+					     u32 feature)
 {
 	struct drm_property *prop;
 	struct sde_cp_node *prop_node = NULL;
@@ -455,44 +432,126 @@
 		priv->cp_property[feature] = prop;
 	}
 
-	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, pp_blk,
-				feature, ops, val);
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
 
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
-
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc)
 {
 	struct sde_hw_cp_cfg hw_cfg;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp;
 	u32 num_mixers = sde_crtc->num_mixers;
 	int i = 0;
-	bool is_dspp = true;
 	bool feature_enabled = false;
+	int ret = 0;
 
-	if (!prop_node->dspp_feature_op && !prop_node->lm_feature_op) {
-		DRM_ERROR("ops not set for dspp/lm\n");
-		return;
-	}
-
-	is_dspp = !prop_node->lm_feature_op;
 	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
 
-	for (i = 0; i < num_mixers; i++) {
-		if (is_dspp) {
-			if (!sde_crtc->mixers[i].hw_dspp)
+	for (i = 0; i < num_mixers && !ret; i++) {
+		hw_lm = sde_crtc->mixers[i].hw_lm;
+		hw_dspp = sde_crtc->mixers[i].hw_dspp;
+
+		switch (prop_node->feature) {
+		case SDE_CP_CRTC_DSPP_VLUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
+				ret = -EINVAL;
 				continue;
-			prop_node->dspp_feature_op(sde_crtc->mixers[i].hw_dspp,
-						   &hw_cfg);
-		} else {
-			if (!sde_crtc->mixers[i].hw_lm)
+			}
+			hw_dspp->ops.setup_vlut(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_PCC:
+			if (!hw_dspp || !hw_dspp->ops.setup_pcc) {
+				ret = -EINVAL;
 				continue;
-			prop_node->lm_feature_op(sde_crtc->mixers[i].hw_lm,
-						 &hw_cfg);
+			}
+			hw_dspp->ops.setup_pcc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_IGC:
+			if (!hw_dspp || !hw_dspp->ops.setup_igc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_igc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_GC:
+			if (!hw_dspp || !hw_dspp->ops.setup_gc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_gc(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_HUE:
+			if (!hw_dspp || !hw_dspp->ops.setup_hue) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_hue(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_SAT:
+			if (!hw_dspp || !hw_dspp->ops.setup_sat) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_sat(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_VAL:
+			if (!hw_dspp || !hw_dspp->ops.setup_val) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_val(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_CONT:
+			if (!hw_dspp || !hw_dspp->ops.setup_cont) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_cont(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_MEMCOLOR:
+			if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor)
+				ret = -EINVAL;
+				continue;
+			hw_dspp->ops.setup_pa_memcolor(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_SIXZONE:
+			if (!hw_dspp || !hw_dspp->ops.setup_sixzone) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_sixzone(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_GAMUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_gamut) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_gamut(hw_dspp, &hw_cfg);
+			break;
+		case SDE_CP_CRTC_LM_GC:
+			if (!hw_lm || !hw_lm->ops.setup_gc) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
 		}
 	}
 
+	if (ret) {
+		DRM_ERROR("failed to %s feature %d\n",
+			((feature_enabled) ? "enable" : "disable"),
+			prop_node->feature);
+		return;
+	}
+
 	if (feature_enabled) {
 		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
 				 prop_node->property_id);
@@ -543,7 +602,7 @@
 							dirty_list) {
 		sde_cp_crtc_setfeature(prop_node, sde_crtc);
 		/* Set the flush flag to true */
-		if (prop_node->dspp_feature_op)
+		if (prop_node->is_dspp_feature)
 			set_dspp_flush = true;
 		else
 			set_lm_flush = true;
@@ -573,10 +632,8 @@
 	struct sde_crtc *sde_crtc = NULL;
 	struct sde_mdss_cfg *catalog = NULL;
 	unsigned long features = 0;
-	int idx = 0, i = 0;
+	int i = 0;
 	struct msm_drm_private *priv;
-	struct sde_hw_dspp *hw_dspp = NULL;
-	struct sde_hw_mixer *hw_mixer = NULL;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		DRM_ERROR("invalid crtc %pK dev %pK\n",
@@ -591,7 +648,7 @@
 	}
 
 	kms = get_kms(crtc);
-	if (!kms || !kms->catalog || !sde_crtc) {
+	if (!kms || !kms->catalog) {
 		DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
 		 kms, ((kms) ? kms->catalog : NULL), sde_crtc);
 		return;
@@ -606,12 +663,6 @@
 		return;
 
 	catalog = kms->catalog;
-	idx = sde_cp_crtc_get_mixer_idx(sde_crtc);
-	if (idx < 0 || idx >= catalog->mixer_count) {
-		DRM_ERROR("invalid idx %d\n", idx);
-		return;
-	}
-
 	priv = crtc->dev->dev_private;
 	/**
 	 * DSPP/LM properties are global to all the CRTCS.
@@ -627,36 +678,29 @@
 	if (!priv->cp_property)
 		return;
 
-	if (idx >= catalog->dspp_count)
+	if (!catalog->dspp_count)
 		goto lm_property;
 
 	/* Check for all the DSPP properties and attach it to CRTC */
-	hw_dspp = sde_crtc->mixers[0].hw_dspp;
-	features = (hw_dspp) ? hw_dspp->cap->features : 0;
-
-	if (!hw_dspp || !hw_dspp->cap->sblk || !features)
-		goto lm_property;
-
+	features = catalog->dspp[0].features;
 	for (i = 0; i < SDE_DSPP_MAX; i++) {
 		if (!test_bit(i, &features))
 			continue;
 		if (dspp_prop_install_func[i])
-			dspp_prop_install_func[i](crtc, hw_dspp);
+			dspp_prop_install_func[i](crtc);
 	}
 
 lm_property:
-	/* Check for all the LM properties and attach it to CRTC */
-	hw_mixer = sde_crtc->mixers[0].hw_lm;
-	features = (hw_mixer) ? hw_mixer->cap->features : 0;
-
-	if (!hw_mixer || !hw_mixer->cap->sblk || !features)
+	if (!catalog->mixer_count)
 		return;
 
+	/* Check for all the LM properties and attach it to CRTC */
+	features = catalog->mixer[0].features;
 	for (i = 0; i < SDE_MIXER_MAX; i++) {
 		if (!test_bit(i, &features))
 			continue;
 		if (lm_prop_install_func[i])
-			lm_prop_install_func[i](crtc, hw_mixer);
+			lm_prop_install_func[i](crtc);
 	}
 }
 
@@ -666,7 +710,7 @@
 {
 	struct sde_cp_node *prop_node = NULL;
 	struct sde_crtc *sde_crtc = NULL;
-	int ret = 0;
+	int ret = 0, i = 0, dspp_cnt, lm_cnt;
 	u8 found = 0;
 
 	if (!crtc || !property) {
@@ -689,7 +733,36 @@
 
 	if (!found)
 		return 0;
+	/**
+	 * sde_crtc is virtual ensure that hardware has been attached to the
+	 * crtc. Check LM and dspp counts based on whether feature is a
+	 * dspp/lm feature.
+	 */
+	if (!sde_crtc->num_mixers ||
+	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
+		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
+			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+		return -EINVAL;
+	}
 
+	dspp_cnt = 0;
+	lm_cnt = 0;
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (sde_crtc->mixers[i].hw_dspp)
+			dspp_cnt++;
+		if (sde_crtc->mixers[i].hw_lm)
+			lm_cnt++;
+	}
+
+	if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
+		DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
+			sde_crtc->num_mixers);
+		return -EINVAL;
+	} else if (lm_cnt < sde_crtc->num_mixers) {
+		DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
+			sde_crtc->num_mixers);
+		return -EINVAL;
+	}
 	/* remove the property from dirty list */
 	list_del_init(&prop_node->dirty_list);
 
@@ -797,21 +870,23 @@
 	/* placeholder for operations needed during resume */
 }
 
-static void dspp_pcc_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp)
+static void dspp_pcc_install_property(struct drm_crtc *crtc)
 {
 	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
 	u32 version;
 
-	version = hw_dspp->cap->sblk->pcc.version >> 16;
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+
+	version = catalog->dspp[0].sblk->pcc.version >> 16;
 	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 		"SDE_DSPP_PCC_V", version);
 	switch (version) {
 	case 1:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-					&hw_dspp->cap->sblk->pcc,
-					SDE_CP_CRTC_DSPP_PCC,
-					hw_dspp->ops.setup_pcc);
+					SDE_CP_CRTC_DSPP_PCC);
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
@@ -819,21 +894,22 @@
 	}
 }
 
-static void dspp_hsic_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp)
+static void dspp_hsic_install_property(struct drm_crtc *crtc)
 {
 	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
 	u32 version;
 
-	version = hw_dspp->cap->sblk->hsic.version >> 16;
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->hsic.version >> 16;
 	switch (version) {
 	case 1:
 		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 			"SDE_DSPP_HUE_V", version);
 		sde_cp_crtc_install_range_property(crtc, feature_name,
-			&hw_dspp->cap->sblk->hsic,
-			SDE_CP_CRTC_DSPP_HUE, hw_dspp->ops.setup_hue,
-			0, U32_MAX, 0);
+			SDE_CP_CRTC_DSPP_HUE, 0, U32_MAX, 0);
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
@@ -841,21 +917,22 @@
 	}
 }
 
-static void dspp_vlut_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp)
+static void dspp_vlut_install_property(struct drm_crtc *crtc)
 {
 	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
 	u32 version;
 
-	version = hw_dspp->cap->sblk->vlut.version >> 16;
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->vlut.version >> 16;
 	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 		"SDE_DSPP_VLUT_V", version);
 	switch (version) {
 	case 1:
 		sde_cp_crtc_install_range_property(crtc, feature_name,
-			&hw_dspp->cap->sblk->vlut,
-			SDE_CP_CRTC_DSPP_VLUT, hw_dspp->ops.setup_vlut,
-			0, U64_MAX, 0);
+			SDE_CP_CRTC_DSPP_VLUT, 0, U64_MAX, 0);
 		sde_cp_create_local_blob(crtc,
 			SDE_CP_CRTC_DSPP_VLUT,
 			sizeof(struct drm_msm_pa_vlut));
@@ -866,13 +943,16 @@
 	}
 }
 
-static void dspp_ad_install_property(struct drm_crtc *crtc,
-					struct sde_hw_dspp *hw_dspp)
+static void dspp_ad_install_property(struct drm_crtc *crtc)
 {
 	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
 	u32 version;
 
-	version = hw_dspp->cap->sblk->ad.version >> 16;
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->dspp[0].sblk->ad.version >> 16;
 	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 		"SDE_DSPP_AD_V", version);
 	switch (version) {
@@ -886,21 +966,22 @@
 	}
 }
 
-static void lm_gc_install_property(struct drm_crtc *crtc,
-				     struct sde_hw_mixer *hw_mixer)
+static void lm_gc_install_property(struct drm_crtc *crtc)
 {
 	char feature_name[256];
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
 	u32 version;
 
-	version = hw_mixer->cap->sblk->gc.version >> 16;
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+	version = catalog->mixer[0].sblk->gc.version >> 16;
 	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 		 "SDE_LM_GC_V", version);
 	switch (version) {
 	case 1:
 		sde_cp_crtc_create_blob_property(crtc, feature_name,
-			&hw_mixer->cap->sblk->gc,
-			SDE_CP_CRTC_LM_GC,
-			hw_mixer->ops.setup_gc);
+			SDE_CP_CRTC_LM_GC);
 		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index dbe52a2..9fa63f8 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -1,4 +1,3 @@
-
 /* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,6 +15,18 @@
 #define _SDE_COLOR_PROCESSING_H
 #include <drm/drm_crtc.h>
 
+/*
+ * PA MEMORY COLOR types
+ * @MEMCOLOR_SKIN          Skin memory color type
+ * @MEMCOLOR_SKY           Sky memory color type
+ * @MEMCOLOR_FOLIAGE       Foliage memory color type
+ */
+enum sde_memcolor_type {
+	MEMCOLOR_SKIN = 0,
+	MEMCOLOR_SKY,
+	MEMCOLOR_FOLIAGE
+};
+
 /**
  * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
  *                     Should be called during crtc initialization.
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 056cf60..ac9997c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -15,6 +15,7 @@
 
 #include "sde_kms.h"
 #include "sde_connector.h"
+#include "sde_backlight.h"
 
 static const struct drm_prop_enum_list e_topology_name[] = {
 	{SDE_RM_TOPOLOGY_UNKNOWN,	"sde_unknown"},
@@ -518,7 +519,8 @@
 			"conn%u",
 			c_conn->base.base.id);
 
-	rc = sde_fence_init(dev, &c_conn->retire_fence, c_conn->name);
+	rc = sde_fence_init(&c_conn->retire_fence, c_conn->name,
+			c_conn->base.base.id);
 	if (rc) {
 		SDE_ERROR("failed to init fence, %d\n", rc);
 		goto error_cleanup_conn;
@@ -536,6 +538,14 @@
 		goto error_unregister_conn;
 	}
 
+	if (c_conn->ops.set_backlight) {
+		rc = sde_backlight_setup(&c_conn->base);
+		if (rc) {
+			pr_err("failed to setup backlight, rc=%d\n", rc);
+			goto error_unregister_conn;
+		}
+	}
+
 	/* create properties */
 	msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
 			priv->conn_property, c_conn->property_data,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index cca934d..9580282 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -111,6 +111,8 @@
 	 * Returns: Zero on success
 	 */
 	int (*get_info)(struct msm_display_info *info, void *display);
+
+	int (*set_backlight)(void *display, u32 bl_lvl);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 3aa4c65..502a7fa 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -12,6 +12,7 @@
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
+#include <linux/debugfs.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
 #include <linux/kthread.h>
@@ -19,18 +20,33 @@
 #include "sde_core_irq.h"
 #include "sde_power_handle.h"
 
+/**
+ * sde_core_irq_callback_handler - dispatch core interrupts
+ * @arg:		private data of callback handler
+ * @irq_idx:		interrupt index
+ */
 static void sde_core_irq_callback_handler(void *arg, int irq_idx)
 {
 	struct sde_kms *sde_kms = arg;
 	struct sde_irq *irq_obj = &sde_kms->irq_obj;
+	struct sde_irq_callback *cb;
+	unsigned long irq_flags;
+
+	SDE_DEBUG("irq_idx=%d\n", irq_idx);
+
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+		SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
+
+	atomic_inc(&irq_obj->irq_counts[irq_idx]);
 
 	/*
 	 * Perform registered function callback
 	 */
-	if (irq_obj->irq_cb_tbl && irq_obj->irq_cb_tbl[irq_idx].func)
-		irq_obj->irq_cb_tbl[irq_idx].func(
-				irq_obj->irq_cb_tbl[irq_idx].arg,
-				irq_idx);
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+		if (cb->func)
+			cb->func(cb->arg, irq_idx);
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 
 	/*
 	 * Clear pending interrupt status in HW.
@@ -53,26 +69,107 @@
 			instance_idx);
 }
 
+/**
+ * _sde_core_irq_enable - enable core interrupt given by the index
+ * @sde_kms:		Pointer to sde kms context
+ * @irq_idx:		interrupt index
+ */
+static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0;
+
+	if (!sde_kms || !sde_kms->hw_intr ||
+			!sde_kms->irq_obj.enable_counts ||
+			!sde_kms->irq_obj.irq_counts) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+	if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+		ret = sde_kms->hw_intr->ops.enable_irq(
+				sde_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+
+		SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+		/* empty callback list but interrupt is enabled */
+		if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
+			SDE_ERROR("irq_idx=%d enabled with no callback\n",
+					irq_idx);
+	}
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return ret;
+}
+
 int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
 {
 	int i;
 	int ret = 0;
 
-	if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.enable_irq)
+	if (!sde_kms || !irq_idxs || !irq_count) {
+		SDE_ERROR("invalid params\n");
 		return -EINVAL;
-
-	for (i = 0; i < irq_count; i++) {
-		ret = sde_kms->hw_intr->ops.enable_irq(
-				sde_kms->hw_intr,
-				irq_idxs[i]);
-		if (ret) {
-			SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
-					irq_idxs[i]);
-			return ret;
-		}
 	}
 
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * _sde_core_irq_disable - disable core interrupt given by the index
+ * @sde_kms:		Pointer to sde kms context
+ * @irq_idx:		interrupt index
+ */
+static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0;
+
+	if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx,
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+	if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = sde_kms->hw_intr->ops.disable_irq(
+				sde_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
 	return ret;
 }
 
@@ -81,21 +178,14 @@
 	int i;
 	int ret = 0;
 
-	if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
-			!sde_kms->hw_intr->ops.disable_irq)
+	if (!sde_kms || !irq_idxs || !irq_count) {
+		SDE_ERROR("invalid params\n");
 		return -EINVAL;
-
-	for (i = 0; i < irq_count; i++) {
-		ret = sde_kms->hw_intr->ops.disable_irq(
-				sde_kms->hw_intr,
-				irq_idxs[i]);
-		if (ret) {
-			SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
-					irq_idxs[i]);
-			return ret;
-		}
 	}
 
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
+
 	return ret;
 }
 
@@ -112,26 +202,56 @@
 int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
 		struct sde_irq_callback *register_irq_cb)
 {
-	struct sde_irq_callback *irq_cb_tbl;
 	unsigned long irq_flags;
 
-	/*
-	 * We allow NULL register_irq_cb as input for callback registration
-	 */
-	if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl)
+	if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+			!sde_kms->irq_obj.irq_cb_tbl) {
+		SDE_ERROR("invalid params\n");
 		return -EINVAL;
+	}
 
 	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
 
-	irq_cb_tbl = sde_kms->irq_obj.irq_cb_tbl;
+	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
 	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
-	irq_cb_tbl[irq_idx].func = register_irq_cb ?
-		register_irq_cb->func : NULL;
-	irq_cb_tbl[irq_idx].arg  = register_irq_cb ?
-		register_irq_cb->arg : NULL;
+	SDE_EVT32(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	list_add_tail(&register_irq_cb->list,
+			&sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
+		struct sde_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+			!sde_kms->irq_obj.irq_cb_tbl) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+	SDE_EVT32(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	/* empty callback list but interrupt is still enabled */
+	if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+			atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
+		SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
 	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 
 	return 0;
@@ -155,9 +275,83 @@
 	sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
 }
 
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+	struct sde_irq *irq_obj = s->private;
+	struct sde_irq_callback *cb;
+	unsigned long irq_flags;
+	int i, irq_count, enable_count, cb_count;
+
+	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	for (i = 0; i < irq_obj->total_irqs; i++) {
+		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+		cb_count = 0;
+		irq_count = atomic_read(&irq_obj->irq_counts[i]);
+		enable_count = atomic_read(&irq_obj->enable_counts[i]);
+		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+			cb_count++;
+		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+		if (irq_count || enable_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+					i, irq_count, enable_count, cb_count);
+	}
+
+	return 0;
+}
+
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
+
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
+			parent, &sde_kms->irq_obj,
+			&sde_debugfs_core_irq_fops);
+
+	return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove(sde_kms->irq_obj.debugfs_file);
+	sde_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+
 void sde_core_irq_preinstall(struct sde_kms *sde_kms)
 {
 	struct msm_drm_private *priv;
+	int i;
 
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
@@ -181,7 +375,18 @@
 	/* Create irq callbacks for all possible irq_idx */
 	sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
 	sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
-			sizeof(struct sde_irq_callback), GFP_KERNEL);
+			sizeof(struct list_head), GFP_KERNEL);
+	sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
+		INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
+		atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
+		atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
+	}
+
+	sde_debugfs_core_irq_init(sde_kms, sde_kms->debugfs_root);
 }
 
 int sde_core_irq_postinstall(struct sde_kms *sde_kms)
@@ -192,6 +397,7 @@
 void sde_core_irq_uninstall(struct sde_kms *sde_kms)
 {
 	struct msm_drm_private *priv;
+	int i;
 
 	if (!sde_kms) {
 		SDE_ERROR("invalid sde_kms\n");
@@ -205,13 +411,25 @@
 	}
 	priv = sde_kms->dev->dev_private;
 
+	sde_debugfs_core_irq_destroy(sde_kms);
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+	for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
+		if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
+				!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
+			SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
+
 	sde_clear_all_irqs(sde_kms);
 	sde_disable_all_irqs(sde_kms);
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
 	kfree(sde_kms->irq_obj.irq_cb_tbl);
+	kfree(sde_kms->irq_obj.enable_counts);
+	kfree(sde_kms->irq_obj.irq_counts);
 	sde_kms->irq_obj.irq_cb_tbl = NULL;
+	sde_kms->irq_obj.enable_counts = NULL;
+	sde_kms->irq_obj.irq_counts = NULL;
+	sde_kms->irq_obj.total_irqs = 0;
 }
 
 irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 5b5bdf1..92642e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -63,6 +63,9 @@
  * @irq_idxs:		Array of irq index
  * @irq_count:		Number of irq_idx provided in the array
  * @return:		0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
  */
 int sde_core_irq_enable(
 		struct sde_kms *sde_kms,
@@ -75,6 +78,9 @@
  * @irq_idxs:		Array of irq index
  * @irq_count:		Number of irq_idx provided in the array
  * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
  */
 int sde_core_irq_disable(
 		struct sde_kms *sde_kms,
@@ -101,11 +107,32 @@
  * @irq_cb:		IRQ callback structure, containing callback function
  *			and argument. Passing NULL for irq_cb will unregister
  *			the callback for the given irq_idx
+ *			This must exist until un-registration.
  * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
  */
 int sde_core_irq_register_callback(
 		struct sde_kms *sde_kms,
 		int irq_idx,
 		struct sde_irq_callback *irq_cb);
 
+/**
+ * sde_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @sde_kms:		SDE handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must match with registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_unregister_callback(
+		struct sde_kms *sde_kms,
+		int irq_idx,
+		struct sde_irq_callback *irq_cb);
+
 #endif /* __SDE_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index b38c2df..27374ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -24,7 +24,10 @@
 #include "sde_hw_lm.h"
 #include "sde_hw_ctl.h"
 #include "sde_crtc.h"
+#include "sde_plane.h"
 #include "sde_color_processing.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
 
 /* default input fence timeout, in ms */
 #define SDE_CRTC_INPUT_FENCE_TIMEOUT    2000
@@ -266,6 +269,9 @@
 		}
 		mixer[i].mixer_op_mode = 0;
 		mixer[i].flush_mask = 0;
+		if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+			mixer[i].hw_ctl->ops.clear_all_blendstages(
+					mixer[i].hw_ctl);
 	}
 
 	/* initialize stage cfg */
@@ -285,30 +291,60 @@
 		/* stage config flush mask */
 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 
-		SDE_DEBUG("lm %d ctl %d add mask 0x%x to pending flush\n",
-			mixer[i].hw_lm->idx, ctl->idx, mixer[i].flush_mask);
+		SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+			mixer[i].hw_lm->idx - LM_0,
+			mixer[i].mixer_op_mode,
+			ctl->idx - CTL_0,
+			mixer[i].flush_mask);
 
 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
 			&sde_crtc->stage_cfg, i);
 	}
 }
 
-void sde_crtc_prepare_fence(struct drm_crtc *crtc)
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_connector *conn;
 
-	if (!crtc) {
+	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 
 	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	SDE_EVT32(DRMID(crtc));
 
-	MSM_EVT(crtc->dev, crtc->base.id, 0);
+	/* identify connectors attached to this crtc */
+	cstate->is_rt = false;
+	cstate->num_connectors = 0;
 
+	drm_for_each_connector(conn, crtc->dev)
+		if (conn->state && conn->state->crtc == crtc &&
+				cstate->num_connectors < MAX_CONNECTORS) {
+			cstate->connectors[cstate->num_connectors++] = conn;
+			sde_connector_prepare_fence(conn);
+
+			if (conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+				cstate->is_rt = true;
+		}
+
+	/* prepare main output fence */
 	sde_fence_prepare(&sde_crtc->output_fence);
 }
 
+bool sde_crtc_is_rt(struct drm_crtc *crtc)
+{
+	if (!crtc || !crtc->state) {
+		SDE_ERROR("invalid crtc or state\n");
+		return true;
+	}
+	return to_sde_crtc_state(crtc->state)->is_rt;
+}
+
 /* if file!=NULL, this is preclose potential cancel-flip path */
 static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
 		struct drm_file *file)
@@ -329,7 +365,7 @@
 			sde_crtc->event = NULL;
 			DRM_DEBUG_VBL("%s: send event: %pK\n",
 						sde_crtc->name, event);
-			MSM_EVT(crtc->dev, crtc->base.id, 0);
+			SDE_EVT32(DRMID(crtc));
 			drm_crtc_send_vblank_event(crtc, event);
 		}
 	}
@@ -339,21 +375,40 @@
 static void sde_crtc_vblank_cb(void *data)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	/* keep statistics on vblank callback - with auto reset via debugfs */
+	if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
+		sde_crtc->vblank_cb_time = ktime_get();
+	else
+		sde_crtc->vblank_cb_count++;
 
 	drm_crtc_handle_vblank(crtc);
 	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
-	MSM_EVT(crtc->dev, crtc->base.id, 0);
+	SDE_EVT32_IRQ(DRMID(crtc));
 }
 
-void sde_crtc_complete_commit(struct drm_crtc *crtc)
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
 {
-	if (!crtc) {
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	int i;
+
+	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 
-	/* signal out fence at end of commit */
-	sde_fence_signal(&to_sde_crtc(crtc)->output_fence, 0);
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	SDE_EVT32(DRMID(crtc));
+
+	/* signal output fence(s) at end of commit */
+	sde_fence_signal(&sde_crtc->output_fence, 0);
+
+	for (i = 0; i < cstate->num_connectors; ++i)
+		sde_connector_complete_commit(cstate->connectors[i]);
 }
 
 /**
@@ -441,7 +496,7 @@
 		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
 		if (!sde_rm_get_hw(rm, &ctl_iter)) {
 			SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
-					mixer->hw_lm->idx);
+					mixer->hw_lm->idx - LM_0);
 			mixer->hw_ctl = last_valid_ctl;
 		} else {
 			mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
@@ -451,7 +506,7 @@
 		/* Shouldn't happen, mixers are always >= ctls */
 		if (!mixer->hw_ctl) {
 			SDE_ERROR("no valid ctls found for lm %d\n",
-					mixer->hw_lm->idx);
+					mixer->hw_lm->idx - LM_0);
 			return;
 		}
 
@@ -489,20 +544,26 @@
 }
 
 static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_crtc_state)
+		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
 	struct drm_device *dev;
 	unsigned long flags;
 	u32 i;
 
-	SDE_DEBUG("\n");
-
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 
+	if (!crtc->state->enable) {
+		SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
 	sde_crtc = to_sde_crtc(crtc);
 	dev = crtc->dev;
 
@@ -558,7 +619,13 @@
 		return;
 	}
 
-	SDE_DEBUG("\n");
+	if (!crtc->state->enable) {
+		SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
 	sde_crtc = to_sde_crtc(crtc);
 
@@ -613,7 +680,7 @@
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(state);
 
-	SDE_DEBUG("\n");
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
 	__drm_atomic_helper_crtc_destroy_state(crtc, state);
 
@@ -721,6 +788,7 @@
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
+	struct drm_encoder *encoder;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -728,9 +796,22 @@
 	}
 	sde_crtc = to_sde_crtc(crtc);
 
-	SDE_DEBUG("\n");
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
 	mutex_lock(&sde_crtc->crtc_lock);
+	if (atomic_read(&sde_crtc->vblank_refcount)) {
+		SDE_ERROR("crtc%d invalid vblank refcount %d\n",
+				crtc->base.id,
+				atomic_read(&sde_crtc->vblank_refcount));
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+			sde_encoder_register_vblank_callback(encoder, NULL,
+						NULL);
+		}
+		atomic_set(&sde_crtc->vblank_refcount, 0);
+	}
+
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 	mutex_unlock(&sde_crtc->crtc_lock);
@@ -750,7 +831,7 @@
 		return;
 	}
 
-	SDE_DEBUG("\n");
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
 	sde_crtc = to_sde_crtc(crtc);
 	mixer = sde_crtc->mixers;
@@ -877,7 +958,7 @@
 		z_pos = pstates[i].stage;
 
 		/* verify z_pos setting before using it */
-		if (z_pos >= SDE_STAGE_MAX) {
+		if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
 			SDE_ERROR("> %d plane stages assigned\n",
 					SDE_STAGE_MAX - SDE_STAGE_0);
 			rc = -EINVAL;
@@ -909,16 +990,30 @@
 
 int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct drm_encoder *encoder;
 	struct drm_device *dev = crtc->dev;
 
-	SDE_DEBUG("%d", en);
+	if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+		SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
+	} else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+		SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
+		return -EINVAL;
+	} else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+		SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
+	} else {
+		SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+				crtc->base.id,
+				en ? "enable" : "disable",
+				atomic_read(&sde_crtc->vblank_refcount));
+		return 0;
+	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		MSM_EVT(crtc->dev, crtc->base.id, en);
+		SDE_EVT32(DRMID(crtc), en);
 
 		if (en)
 			sde_encoder_register_vblank_callback(encoder,
@@ -935,7 +1030,7 @@
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 
-	SDE_DEBUG("%s: cancel: %p", sde_crtc->name, file);
+	SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
 	_sde_crtc_complete_flip(crtc, file);
 }
 
@@ -1186,6 +1281,27 @@
 			state->crtc_h);
 		seq_puts(s, "\n");
 	}
+
+	if (sde_crtc->vblank_cb_count) {
+		ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
+		s64 diff_ms = ktime_to_ms(diff);
+		s64 fps = diff_ms ? DIV_ROUND_CLOSEST(
+				sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+		seq_printf(s,
+			"vblank fps:%lld count:%u total:%llums\n",
+				fps,
+				sde_crtc->vblank_cb_count,
+				ktime_to_ms(diff));
+
+		/* reset time & count for next measurement */
+		sde_crtc->vblank_cb_count = 0;
+		sde_crtc->vblank_cb_time = ktime_set(0, 0);
+	}
+
+	seq_printf(s, "vblank_refcount:%d\n",
+			atomic_read(&sde_crtc->vblank_refcount));
+
 	mutex_unlock(&sde_crtc->crtc_lock);
 
 	return 0;
@@ -1267,6 +1383,7 @@
 
 	crtc = &sde_crtc->base;
 	crtc->dev = dev;
+	atomic_set(&sde_crtc->vblank_refcount, 0);
 
 	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
 
@@ -1277,8 +1394,8 @@
 	snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
 
 	/* initialize output fence support */
-	sde_fence_init(dev, &sde_crtc->output_fence, sde_crtc->name);
 	mutex_init(&sde_crtc->crtc_lock);
+	sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
 
 	/* initialize debugfs support */
 	_sde_crtc_init_debugfs(sde_crtc, kms);
@@ -1290,7 +1407,10 @@
 			sizeof(struct sde_crtc_state));
 
 	sde_crtc_install_properties(crtc, kms->catalog);
+
+	/* Install color processing properties */
 	sde_cp_crtc_init(crtc);
+	sde_cp_crtc_install_properties(crtc);
 
 	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
 	return crtc;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 40dff398..65c073e 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -54,6 +54,9 @@
  * @property_defaults : Array of default values for generic property support
  * @stage_cfg     : H/w mixer stage configuration
  * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_refcount : reference count for vblank enable request
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
@@ -81,6 +84,10 @@
 	struct sde_hw_stage_cfg stage_cfg;
 	struct dentry *debugfs_root;
 
+	u32 vblank_cb_count;
+	ktime_t vblank_cb_time;
+	atomic_t vblank_refcount;
+
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
@@ -93,12 +100,20 @@
 /**
  * struct sde_crtc_state - sde container for atomic crtc state
  * @base: Base drm crtc state structure
+ * @connectors    : Currently associated drm connectors
+ * @num_connectors: Number of associated drm connectors
+ * @is_rt         : Whether or not the current commit contains RT connectors
  * @property_values: Current crtc property values
  * @input_fence_timeout_ns : Cached input fence timeout, in ns
  * @property_blobs: Reference pointers for blob properties
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
+
+	struct drm_connector *connectors[MAX_CONNECTORS];
+	int num_connectors;
+	bool is_rt;
+
 	uint64_t property_values[CRTC_PROP_COUNT];
 	uint64_t input_fence_timeout_ns;
 	struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
@@ -153,10 +168,20 @@
 void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
 
 /**
- * sde_crtc_prepare_fence - callback to prepare for output fences
+ * sde_crtc_prepare_commit - callback to prepare for output fences
  * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
  */
-void sde_crtc_prepare_fence(struct drm_crtc *crtc);
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
 
 /**
  * sde_crtc_init - create a new crtc object
@@ -167,16 +192,17 @@
 struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
 
 /**
- * sde_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- */
-void sde_crtc_complete_commit(struct drm_crtc *crtc);
-
-/**
  * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
  * @crtc: Pointer to drm crtc object
  * @file: client to cancel's file handle
  */
 void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 
+/**
+ * sde_crtc_is_rt - query whether real time connectors are present on the crtc
+ * @crtc: Pointer to drm crtc structure
+ * Returns: True if a connector is present with real time constraints
+ */
+bool sde_crtc_is_rt(struct drm_crtc *crtc);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index fd57404..af5f81d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -25,7 +25,6 @@
 #include "sde_hw_ctl.h"
 #include "sde_formats.h"
 #include "sde_encoder_phys.h"
-#include "display_manager.h"
 #include "sde_color_processing.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
@@ -44,8 +43,7 @@
 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
 	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
 
-/* Wait timeout sized on worst case of 4 60fps frames ~= 67ms */
-#define WAIT_TIMEOUT_MSEC 67
+#define MAX_CHANNELS_PER_ENC 2
 
 /**
  * struct sde_encoder_virt - virtual encoder. Container of one or more physical
@@ -54,35 +52,27 @@
  *	Virtual encoder defers as much as possible to the physical encoders.
  *	Virtual encoder registers itself with the DRM Framework as the encoder.
  * @base:		drm_encoder base class for registration with DRM
- * @spin_lock:		Lock for IRQ purposes
+ * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @bus_scaling_client:	Client handle to the bus scaling interface
  * @num_phys_encs:	Actual number of physical encoders contained.
  * @phys_encs:		Container of physical encoders managed.
  * @cur_master:		Pointer to the current master in this mode. Optimization
  *			Only valid after enable. Cleared as disable.
+ * @hw_pp		Handle to the pingpong blocks used for the display. No.
+ *                      pingpong blocks can be different than num_phys_encs.
  * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
  *			notification of the VBLANK
  * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
- * @pending_kickoff_mask:	Bitmask used to track which physical encoders
- *				still have pending transmissions before we can
- *				trigger the next kickoff. Bitmask tracks the
- *				index of the phys_enc table. Protect since
- *				shared between irq and commit thread
  * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
  *				all CTL paths
  * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
- * @pending_kickoff_mask:	Bitmask tracking which phys_enc we are still
- *				waiting on before we can trigger the next
- *				kickoff. Bit0 = phys_encs[0] etc.
- * @pending_kickoff_wq:		Wait queue commit thread to wait on phys_encs
- *				become ready for kickoff in IRQ contexts
  * @debugfs_root:		Debug file system root file node
  * @enc_lock:			Lock around physical encoder create/destroy and
 				access.
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
-	spinlock_t spin_lock;
+	spinlock_t enc_spinlock;
 	uint32_t bus_scaling_client;
 
 	uint32_t display_num_of_h_tiles;
@@ -90,13 +80,11 @@
 	unsigned int num_phys_encs;
 	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *cur_master;
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 
 	void (*crtc_vblank_cb)(void *);
 	void *crtc_vblank_cb_data;
 
-	unsigned int pending_kickoff_mask;
-	wait_queue_head_t pending_kickoff_wq;
-
 	struct dentry *debugfs_root;
 	struct mutex enc_lock;
 };
@@ -218,7 +206,7 @@
 	}
 }
 
-static void sde_encoder_destroy(struct drm_encoder *drm_enc)
+void sde_encoder_destroy(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
 	int i = 0;
@@ -256,6 +244,57 @@
 	kfree(sde_enc);
 }
 
+void sde_encoder_helper_split_config(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_intf interface)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct split_pipe_cfg cfg = { 0 };
+	struct sde_hw_mdp *hw_mdptop;
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(phys_enc->parent);
+	hw_mdptop = phys_enc->hw_mdptop;
+	cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+	cfg.mode = phys_enc->intf_mode;
+	cfg.intf = interface;
+
+	if (cfg.en && phys_enc->ops.needs_single_flush &&
+			phys_enc->ops.needs_single_flush(phys_enc))
+		cfg.split_flush_en = true;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+		cfg.pp_split_slave = cfg.intf;
+	else
+		cfg.pp_split_slave = INTF_MAX;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE) {
+		/* master/solo encoder */
+		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
+
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+	} else {
+		/*
+		 * slave encoder
+		 * - determine split index from master index,
+		 *   assume master is first pp
+		 */
+		cfg.pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
+		SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
+				cfg.pp_split_index);
+
+		if (hw_mdptop->ops.setup_pp_split)
+			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+	}
+}
+
 static int sde_encoder_virt_atomic_check(
 		struct drm_encoder *drm_enc,
 		struct drm_crtc_state *crtc_state,
@@ -282,7 +321,7 @@
 	sde_kms = to_sde_kms(priv->kms);
 	mode = &crtc_state->mode;
 	adj_mode = &crtc_state->adjusted_mode;
-	MSM_EVT(drm_enc->dev, 0, 0);
+	SDE_EVT32(DRMID(drm_enc));
 
 	/* perform atomic check on the first physical encoder (master) */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -307,13 +346,10 @@
 		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
 				conn_state, true);
 
-	if (!ret) {
-		sde_cp_crtc_install_properties(drm_enc->crtc);
-		/* populate mode->crtc* information required by framework */
+	if (!ret)
 		drm_mode_set_crtcinfo(adj_mode, 0);
-	}
 
-	MSM_EVT(drm_enc->dev, adj_mode->flags, adj_mode->private_flags);
+	SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
 
 	return ret;
 }
@@ -327,6 +363,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
+	struct sde_rm_hw_iter pp_iter;
 	int i = 0, ret;
 
 	if (!drm_enc) {
@@ -341,7 +378,7 @@
 	sde_kms = to_sde_kms(priv->kms);
 	connector_list = &sde_kms->dev->mode_config.connector_list;
 
-	MSM_EVT(drm_enc->dev, 0, 0);
+	SDE_EVT32(DRMID(drm_enc));
 
 	list_for_each_entry(conn_iter, connector_list, head)
 		if (conn_iter->encoder == drm_enc)
@@ -364,10 +401,24 @@
 		return;
 	}
 
+	sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		sde_enc->hw_pp[i] = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+			break;
+		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+	}
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys) {
+			if (!sde_enc->hw_pp[i]) {
+				SDE_ERROR_ENC(sde_enc,
+				    "invalid pingpong block for the encoder\n");
+				return;
+			}
+			phys->hw_pp = sde_enc->hw_pp[i];
 			phys->connector = conn->state->connector;
 			if (phys->ops.mode_set)
 				phys->ops.mode_set(phys, mode, adj_mode);
@@ -398,13 +449,13 @@
 	sde_kms = to_sde_kms(priv->kms);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
-
-	MSM_EVT(drm_enc->dev, 0, 0);
+	SDE_EVT32(DRMID(drm_enc));
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
 
 	bs_set(sde_enc, 1);
 
+	sde_enc->cur_master = NULL;
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
@@ -412,21 +463,20 @@
 			atomic_set(&phys->vsync_cnt, 0);
 			atomic_set(&phys->underrun_cnt, 0);
 
-			if (phys->ops.enable)
-				phys->ops.enable(phys);
-
-			/*
-			 * Master can switch at enable time.
-			 * It is based on the current mode (CMD/VID) and
-			 * the encoder role found at panel probe time
-			 */
 			if (phys->ops.is_master && phys->ops.is_master(phys)) {
 				SDE_DEBUG_ENC(sde_enc,
 						"master is now idx %d\n", i);
 				sde_enc->cur_master = phys;
+			} else if (phys->ops.enable) {
+				phys->ops.enable(phys);
 			}
 		}
 	}
+
+	if (!sde_enc->cur_master)
+		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+	else if (sde_enc->cur_master->ops.enable)
+		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -453,7 +503,7 @@
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 
-	MSM_EVT(drm_enc->dev, 0, 0);
+	SDE_EVT32(DRMID(drm_enc));
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -472,8 +522,6 @@
 	SDE_DEBUG_ENC(sde_enc, "cleared master\n");
 
 	bs_set(sde_enc, 0);
-	sde_cp_crtc_destroy_properties(drm_enc->crtc);
-
 	sde_rm_release(&sde_kms->rm, drm_enc);
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -525,10 +573,10 @@
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
-	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
 	if (sde_enc->crtc_vblank_cb)
 		sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
-	spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 
 	atomic_inc(&phy_enc->vsync_cnt);
 }
@@ -557,12 +605,12 @@
 		return;
 	}
 	SDE_DEBUG_ENC(sde_enc, "\n");
-	MSM_EVT(drm_enc->dev, enable, 0);
+	SDE_EVT32(DRMID(drm_enc), enable);
 
-	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
 	sde_enc->crtc_vblank_cb = vbl_cb;
 	sde_enc->crtc_vblank_cb_data = vbl_data;
-	spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -572,29 +620,6 @@
 	}
 }
 
-static void sde_encoder_handle_phys_enc_ready_for_kickoff(
-		struct drm_encoder *drm_enc,
-		struct sde_encoder_phys *ready_phys)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	unsigned long lock_flags;
-	unsigned int i, mask;
-
-	/* One of the physical encoders has become ready for kickoff */
-	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		if (sde_enc->phys_encs[i] == ready_phys) {
-			spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
-			sde_enc->pending_kickoff_mask &= ~(1 << i);
-			mask = sde_enc->pending_kickoff_mask;
-			spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
-			MSM_EVT(drm_enc->dev, i, mask);
-		}
-	}
-
-	/* Wake the commit thread to check if they all ready for kickoff */
-	wake_up_all(&sde_enc->pending_kickoff_wq);
-}
-
 /**
  * _sde_encoder_trigger_flush - trigger flush for a physical encoder
  * drm_enc: Pointer to drm encoder structure
@@ -605,6 +630,7 @@
 		struct sde_encoder_phys *phys, uint32_t extra_flush_bits)
 {
 	struct sde_hw_ctl *ctl;
+	int pending_kickoff_cnt;
 
 	if (!drm_enc || !phys) {
 		SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
@@ -618,11 +644,15 @@
 		return;
 	}
 
+	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+	SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
+			phys->intf_idx, pending_kickoff_cnt);
+
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
 
 	ctl->ops.trigger_flush(ctl);
-	MSM_EVT(drm_enc->dev, drm_enc->base.id, ctl->idx);
+	SDE_EVT32(DRMID(drm_enc), ctl->idx);
 }
 
 /**
@@ -657,9 +687,31 @@
 	}
 
 	if (phys_enc && phys_enc->parent)
-		MSM_EVT(phys_enc->parent->dev,
-				phys_enc->parent->base.id,
-				ctl_idx);
+		SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
+}
+
+int sde_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		wait_queue_head_t *wq,
+		atomic_t *cnt,
+		s64 timeout_ms)
+{
+	int rc = 0;
+	s64 expected_time = ktime_to_ms(ktime_get()) + timeout_ms;
+	s64 jiffies = msecs_to_jiffies(timeout_ms);
+	s64 time;
+
+	do {
+		rc = wait_event_timeout(*wq, atomic_read(cnt) == 0, jiffies);
+		time = ktime_to_ms(ktime_get());
+
+		SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+				atomic_read(cnt));
+	/* If we timed out, counter is valid and time is less, wait again */
+	} while (atomic_read(cnt) && (rc == 0) && (time < expected_time));
+
+	return rc;
 }
 
 /**
@@ -675,6 +727,7 @@
 {
 	struct sde_hw_ctl *ctl;
 	uint32_t i, pending_flush;
+	unsigned long lock_flags;
 
 	if (!sde_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -683,16 +736,22 @@
 
 	pending_flush = 0x0;
 
+	/* update pending counts and trigger kickoff ctl flush atomically */
+	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+
 	/* don't perform flush/start operations for slave encoders */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		ctl = phys->hw_ctl;
-		if (!ctl || phys->enable_state == SDE_ENC_DISABLED)
+		if (!phys || phys->enable_state == SDE_ENC_DISABLED)
 			continue;
 
-		if (!phys->ops.needs_split_flush ||
-				!phys->ops.needs_split_flush(phys))
+		ctl = phys->hw_ctl;
+		if (!ctl)
+			continue;
+
+		if (!phys->ops.needs_single_flush ||
+				!phys->ops.needs_single_flush(phys))
 			_sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
 		else if (ctl->ops.get_pending_flush)
 			pending_flush |= ctl->ops.get_pending_flush(ctl);
@@ -707,16 +766,15 @@
 	}
 
 	_sde_encoder_trigger_start(sde_enc->cur_master);
+
+	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
 void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
-	unsigned long lock_flags;
-	bool need_to_wait;
 	unsigned int i;
-	int ret;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -725,52 +783,21 @@
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
-	MSM_EVT(drm_enc->dev, 0, 0);
+	SDE_EVT32(DRMID(drm_enc));
 
-	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
-	sde_enc->pending_kickoff_mask = 0;
-	spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
-
+	/* prepare for next kickoff, may include waiting on previous kickoff */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		need_to_wait = false;
 		phys = sde_enc->phys_encs[i];
-
 		if (phys && phys->ops.prepare_for_kickoff)
-			phys->ops.prepare_for_kickoff(phys, &need_to_wait);
-
-		if (need_to_wait) {
-			spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
-			sde_enc->pending_kickoff_mask |= 1 << i;
-			spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
-		}
+			phys->ops.prepare_for_kickoff(phys);
 	}
 
-	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
-	MSM_EVT(drm_enc->dev, sde_enc->pending_kickoff_mask, 0);
-	spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
-
-	/* Wait for the busy phys encs to be ready */
-	ret = -ERESTARTSYS;
-	while (ret == -ERESTARTSYS) {
-		spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
-		ret = wait_event_interruptible_lock_irq_timeout(
-				sde_enc->pending_kickoff_wq,
-				sde_enc->pending_kickoff_mask == 0,
-				sde_enc->spin_lock,
-				msecs_to_jiffies(WAIT_TIMEOUT_MSEC));
-		spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
-		if (!ret)
-			SDE_DEBUG_ENC(sde_enc, "wait %ums timed out\n",
-					WAIT_TIMEOUT_MSEC);
-	}
-
-	/* All phys encs are ready to go, trigger the kickoff */
+	/* all phys encs are ready to go, trigger the kickoff */
 	_sde_encoder_kickoff_phys(sde_enc);
 
-	/* Allow phys encs to handle any post-kickoff business */
+	/* allow phys encs to handle any post-kickoff business */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
-		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
+		phys = sde_enc->phys_encs[i];
 		if (phys && phys->ops.handle_post_kickoff)
 			phys->ops.handle_post_kickoff(phys);
 	}
@@ -827,6 +854,108 @@
 	return single_open(file, _sde_encoder_status_show, inode->i_private);
 }
 
+static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
+					u32 frame_count)
+{
+	int j;
+
+	if (!phys->misr_map)
+		return;
+
+	phys->misr_map->enable = enable;
+
+	if (frame_count <= SDE_CRC_BATCH_SIZE)
+		phys->misr_map->frame_count = frame_count;
+	else if (frame_count <= 0)
+		phys->misr_map->frame_count = 0;
+	else
+		phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
+
+	if (!enable) {
+		phys->misr_map->last_idx = 0;
+		phys->misr_map->frame_count = 0;
+		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+			phys->misr_map->crc_value[j] = 0;
+	}
+}
+
+static ssize_t _sde_encoder_misr_set(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_encoder *drm_enc;
+	int i = 0;
+	char buf[10];
+	u32 enable, frame_count;
+
+	drm_enc = file->private_data;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+		return -EFAULT;
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+			continue;
+
+		_sde_set_misr_params(phys, enable, frame_count);
+		phys->ops.setup_misr(phys, phys->misr_map);
+	}
+	mutex_unlock(&sde_enc->enc_lock);
+	return count;
+}
+
+static ssize_t _sde_encoder_misr_read(
+		struct file *file,
+		char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_encoder *drm_enc;
+	int i = 0, j = 0, len = 0;
+	char buf[512] = {'\0'};
+
+	if (*ppos)
+		return 0;
+
+	drm_enc = file->private_data;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+		struct sde_misr_params *misr_map;
+
+		if (!phys || !phys->misr_map)
+			continue;
+
+		misr_map = phys->misr_map;
+
+		len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
+		for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+			len += snprintf(buf+len, sizeof(buf), "%x\n",
+						misr_map->crc_value[j]);
+	}
+
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+	mutex_unlock(&sde_enc->enc_lock);
+
+	return len;
+}
+
 static void _sde_encoder_init_debugfs(struct drm_encoder *drm_enc,
 	struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms)
 {
@@ -836,6 +965,13 @@
 		.llseek =	seq_lseek,
 		.release =	single_release,
 	};
+
+	static const struct file_operations debugfs_misr_fops = {
+		.open = simple_open,
+		.read = _sde_encoder_misr_read,
+		.write = _sde_encoder_misr_set,
+	};
+
 	char name[SDE_NAME_SIZE];
 
 	if (!drm_enc || !sde_enc || !sde_kms) {
@@ -852,6 +988,10 @@
 		/* don't error check these */
 		debugfs_create_file("status", 0644,
 			sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
+
+		debugfs_create_file("misr_data", 0644,
+			sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+
 	}
 }
 
@@ -947,7 +1087,6 @@
 	struct sde_encoder_virt_ops parent_ops = {
 		sde_encoder_vblank_callback,
 		sde_encoder_underrun_callback,
-		sde_encoder_handle_phys_enc_ready_for_kickoff
 	};
 	struct sde_enc_phys_init_params phys_params;
 
@@ -961,6 +1100,7 @@
 	phys_params.sde_kms = sde_kms;
 	phys_params.parent = &sde_enc->base;
 	phys_params.parent_ops = parent_ops;
+	phys_params.enc_spinlock = &sde_enc->enc_spinlock;
 
 	SDE_DEBUG("\n");
 
@@ -1049,8 +1189,9 @@
 	return ret;
 }
 
-static struct drm_encoder *sde_encoder_virt_init(
-		struct drm_device *dev, struct msm_display_info *disp_info)
+struct drm_encoder *sde_encoder_init(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct sde_kms *sde_kms = to_sde_kms(priv->kms);
@@ -1072,13 +1213,11 @@
 		goto fail;
 
 	sde_enc->cur_master = NULL;
-	spin_lock_init(&sde_enc->spin_lock);
+	spin_lock_init(&sde_enc->enc_spinlock);
 	drm_enc = &sde_enc->base;
 	drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
 	drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
 	bs_init(sde_enc);
-	sde_enc->pending_kickoff_mask = 0;
-	init_waitqueue_head(&sde_enc->pending_kickoff_wq);
 
 	_sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
 
@@ -1114,69 +1253,12 @@
 			if (ret)
 				return ret;
 		}
+
+		if (phys && phys->ops.collect_misr)
+			if (phys->misr_map && phys->misr_map->enable)
+				phys->ops.collect_misr(phys, phys->misr_map);
 	}
 
 	return ret;
 }
 
-/* encoders init,
- * initialize encoder based on displays
- */
-void sde_encoders_init(struct drm_device *dev)
-{
-	struct msm_drm_private *priv = NULL;
-	struct display_manager *disp_man = NULL;
-	u32 i = 0;
-	u32 num_displays = 0;
-
-	SDE_DEBUG("\n");
-
-	if (!dev || !dev->dev_private) {
-		SDE_ERROR("invalid device %d\n", dev != 0);
-		return;
-	}
-
-	priv = dev->dev_private;
-	priv->num_encoders = 0;
-	if (!priv->kms || !priv->dm) {
-		SDE_ERROR("invalid priv pointer, kms %d dm %d\n",
-				priv->kms != 0, priv->dm != 0);
-		return;
-	}
-	disp_man = priv->dm;
-
-	num_displays = display_manager_get_count(disp_man);
-	SDE_DEBUG("num_displays %d\n", num_displays);
-
-	if (num_displays > ARRAY_SIZE(priv->encoders)) {
-		num_displays = ARRAY_SIZE(priv->encoders);
-		SDE_ERROR("too many displays found, capping to %d\n",
-				num_displays);
-	}
-
-	for (i = 0; i < num_displays; i++) {
-		struct msm_display_info info = { 0 };
-		struct drm_encoder *enc = NULL;
-		u32 ret = 0;
-
-		ret = display_manager_get_info_by_index(disp_man, i, &info);
-		if (ret) {
-			SDE_ERROR("failed to get display info, %d\n", ret);
-			return;
-		}
-
-		enc = sde_encoder_virt_init(dev, &info);
-		if (IS_ERR_OR_NULL(enc)) {
-			SDE_ERROR("encoder initialization failed\n");
-			return;
-		}
-
-		ret = display_manager_drm_init_by_index(disp_man, i, enc);
-		if (ret) {
-			SDE_ERROR("display drm_init failed, %d\n", ret);
-			return;
-		}
-
-		priv->encoders[priv->num_encoders++] = enc;
-	}
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
new file mode 100644
index 0000000..fccc264
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ENCODER_H__
+#define __SDE_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Encoder functions and data types
+ * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles:
+ */
+struct sde_encoder_hw_resources {
+	enum sde_intf_mode intfs[INTF_MAX];
+	enum sde_intf_mode wbs[WB_MAX];
+	bool needs_cdm;
+	u32 display_num_of_h_tiles;
+};
+
+/**
+ * sde_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:	encoder pointer
+ * @hw_res:	resource table to populate with encoder required resources
+ * @conn_state:	report hw reqs based on this proposed connector state
+ */
+void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
+		struct sde_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state);
+
+/**
+ * sde_encoder_register_vblank_callback - provide callback to encoder that
+ *	will be called on the next vblank.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister and disable IRQs
+ * @data:	user data provided to callback
+ */
+void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_schedule_kickoff - Register a callback with the encoder to
+ *	trigger a double buffer flip of the ctl path (i.e. ctl flush and start)
+ *	at the appropriate time.
+ *	Immediately: if no previous commit is outstanding.
+ *	Delayed: Save the callback, and return. Does not block. Callback will
+ *	be triggered later. E.g. cmd encoder will trigger at pp_done irq
+ *	irq if it outstanding.
+ * @encoder:	encoder pointer
+ */
+void sde_encoder_schedule_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
+ *	current pending frames to hardware at a vblank or ctl_start
+ *	Encoders will map this differently depending on irqs
+ *	vid mode -> vsync_irq
+ * @encoder:	encoder pointer
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+
+/**
+ * sde_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *sde_encoder_init(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info);
+
+/**
+ * sde_encoder_destroy - destroy previously initialized virtual encoder
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void sde_encoder_destroy(struct drm_encoder *drm_enc);
+
+#endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 22d187e..a52e395 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -15,6 +15,8 @@
 #ifndef __SDE_ENCODER_PHYS_H__
 #define __SDE_ENCODER_PHYS_H__
 
+#include <linux/jiffies.h>
+
 #include "sde_kms.h"
 #include "sde_hw_intf.h"
 #include "sde_hw_pingpong.h"
@@ -22,9 +24,15 @@
 #include "sde_hw_top.h"
 #include "sde_hw_wb.h"
 #include "sde_hw_cdm.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
 
 #define SDE_ENCODER_NAME_MAX	16
 
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS		84
+#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
 /**
  * enum sde_enc_split_role - Role this physical encoder will play in a
  *	split-panel configuration, where one panel is master, and others slaves.
@@ -48,16 +56,12 @@
  *			Note: This is called from IRQ handler context.
  * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
  *			Note: This is called from IRQ handler context.
- * @handle_ready_for_kickoff:	Notify virtual encoder that this phys encoder
- *				is now ready for the next kickoff.
  */
 struct sde_encoder_virt_ops {
 	void (*handle_vblank_virt)(struct drm_encoder *,
 			struct sde_encoder_phys *phys);
 	void (*handle_underrun_virt)(struct drm_encoder *,
 			struct sde_encoder_phys *phys);
-	void (*handle_ready_for_kickoff)(struct drm_encoder *,
-			struct sde_encoder_phys *phys);
 };
 
 /**
@@ -80,12 +84,12 @@
  * @wait_for_commit_done:	Wait for hardware to have flushed the
  *				current pending frames to hardware
  * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
- *				and report whether need to wait before
- *				triggering the next kickoff
- *				(ie for previous tx to complete)
+ *				For CMD encoder, may wait for previous tx done
  * @handle_post_kickoff:	Do any work necessary post-kickoff work
  * @trigger_start:		Process start event on physical encoder
- * @needs_split_flush:		Whether encoder type needs split flush
+ * @needs_single_flush:		Whether encoder slaves need to be flushed
+ * @setup_misr:		Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:		Collects MISR data on frame update
  */
 
 struct sde_encoder_phys_ops {
@@ -107,11 +111,15 @@
 			struct drm_connector_state *conn_state);
 	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
 	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
-	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
-			bool *wait_until_ready);
+	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
 	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
 	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
-	bool (*needs_split_flush)(struct sde_encoder_phys *phys_enc);
+	bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
+
+	void (*setup_misr)(struct sde_encoder_phys *phys_encs,
+			struct sde_misr_params *misr_map);
+	void (*collect_misr)(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map);
 };
 
 /**
@@ -154,17 +162,24 @@
  * @hw_ctl:		Hardware interface to the ctl registers
  * @hw_cdm:		Hardware interface to the cdm registers
  * @cdm_cfg:		Chroma-down hardware configuration
+ * @hw_pp:		Hardware interface to the ping pong registers
  * @sde_kms:		Pointer to the sde_kms top level
  * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
+ * @misr_map:		Interface for setting and collecting MISR data
  * @enabled:		Whether the encoder has enabled and running a mode
  * @split_role:		Role to play in a split-panel configuration
  * @intf_mode:		Interface mode
  * @intf_idx:		Interface index on sde hardware
- * @spin_lock:		Lock for IRQ purposes
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @enable_state:	Enable state tracking
  * @vblank_refcount:	Reference count of vblank request
  * @vsync_cnt:		Vsync count for the physical encoder
  * @underrun_cnt:	Underrun count for the physical encoder
+ * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
+ *				vs. the number of done/vblank irqs. Should hover
+ *				between 0-2 Incremented when a new kickoff is
+ *				scheduled. Decremented in irq handler
+ * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
  */
 struct sde_encoder_phys {
 	struct drm_encoder *parent;
@@ -175,31 +190,40 @@
 	struct sde_hw_ctl *hw_ctl;
 	struct sde_hw_cdm *hw_cdm;
 	struct sde_hw_cdm_cfg cdm_cfg;
+	struct sde_hw_pingpong *hw_pp;
 	struct sde_kms *sde_kms;
 	struct drm_display_mode cached_mode;
+	struct sde_misr_params *misr_map;
 	enum sde_enc_split_role split_role;
 	enum sde_intf_mode intf_mode;
 	enum sde_intf intf_idx;
-	spinlock_t spin_lock;
+	spinlock_t *enc_spinlock;
 	enum sde_enc_enable_state enable_state;
 	atomic_t vblank_refcount;
 	atomic_t vsync_cnt;
 	atomic_t underrun_cnt;
+	atomic_t pending_kickoff_cnt;
+	wait_queue_head_t pending_kickoff_wq;
 };
 
+static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
+{
+	return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
 /**
  * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
  *	mode specific operations
  * @base:	Baseclass physical encoder structure
  * @irq_idx:	IRQ interface lookup index
+ * @irq_cb:	interrupt callback
  * @hw_intf:	Hardware interface to the intf registers
- * @vblank_completion:	Completion event signaled on reception of the vsync irq
  */
 struct sde_encoder_phys_vid {
 	struct sde_encoder_phys base;
 	int irq_idx[INTR_IDX_MAX];
+	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 	struct sde_hw_intf *hw_intf;
-	struct completion vblank_completion;
 };
 
 /**
@@ -208,26 +232,17 @@
  * @base:	Baseclass physical encoder structure
  * @intf_idx:	Intf Block index used by this phys encoder
  * @stream_sel:	Stream selection for multi-stream interfaces
- * @hw_pp:	Hardware interface to the ping pong registers
  * @pp_rd_ptr_irq_idx:	IRQ signifying panel's frame read pointer
  *			For CMD encoders, VBLANK is driven by the PP RD Done IRQ
  * @pp_tx_done_irq_idx:	IRQ signifying frame transmission to panel complete
- * @pp_tx_done_wq:	Wait queue that tracks when a commit is flushed
- *			to hardware after the reception of pp_done
- *			Used to prevent back to back commits
- * @pending_cnt:	Atomic counter tracking the number of kickoffs vs.
- *			the number of pp_done irqs. Should hover between 0-2
- *			Incremented when a new kickoff is scheduled
- *			Decremented in pp_done irq
+ * @irq_cb:	interrupt callback
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
 	int intf_idx;
 	int stream_sel;
-	struct sde_hw_pingpong *hw_pp;
 	int irq_idx[INTR_IDX_MAX];
-	wait_queue_head_t pp_tx_done_wq;
-	atomic_t pending_cnt;
+	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 };
 
 /**
@@ -256,6 +271,7 @@
 	struct sde_encoder_phys base;
 	struct sde_hw_wb *hw_wb;
 	int irq_idx;
+	struct sde_irq_callback irq_cb;
 	u32 wbdone_timeout;
 	u32 bypass_irqreg;
 	struct completion wbdone_complete;
@@ -283,6 +299,7 @@
  * @split_role:		Role to play in a split-panel configuration
  * @intf_idx:		Interface index this phys_enc will control
  * @wb_idx:		Writeback index this phys_enc will control
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  */
 struct sde_enc_phys_init_params {
 	struct sde_kms *sde_kms;
@@ -291,6 +308,7 @@
 	enum sde_enc_split_role split_role;
 	enum sde_intf intf_idx;
 	enum sde_wb wb_idx;
+	spinlock_t *enc_spinlock;
 };
 
 /**
@@ -338,6 +356,24 @@
  */
 void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
 
+/**
+ * sde_encoder_helper_wait_event_timeout - wait for event with timeout
+ *	taking into account that jiffies may jump between reads leading to
+ *	incorrectly detected timeouts. Prevent failure in this scenario by
+ *	making sure that elapsed time during wait is valid.
+ * @drm_id: drm object id for logging
+ * @hw_id: hw instance id for logging
+ * @wq: wait queue structure
+ * @cnt: atomic counter to wait on
+ * @timeout_ms: timeout value in milliseconds
+ */
+int sde_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		wait_queue_head_t *wq,
+		atomic_t *cnt,
+		s64 timeout_ms);
+
 
 static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
 		struct sde_encoder_phys *phys_enc)
@@ -352,4 +388,15 @@
 	return BLEND_3D_NONE;
 }
 
+/**
+ * sde_encoder_helper_split_config - split display configuration helper function
+ *	This helper function may be used by physical encoders to configure
+ *	the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum sde_intf setting
+ */
+void sde_encoder_helper_split_config(
+		struct sde_encoder_phys *phys_enc,
+		enum sde_intf interface);
+
 #endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 3f478fa..6e57c7d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -13,8 +13,6 @@
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/jiffies.h>
-
 #include "sde_encoder_phys.h"
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
@@ -33,10 +31,6 @@
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
-#define DEV(phy_enc) (phy_enc->parent->dev)
-
-#define WAIT_TIMEOUT_MSEC			100
-
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -86,9 +80,8 @@
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
-			phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
+		if (sde_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
@@ -97,45 +90,29 @@
 		phys_enc->hw_ctl = NULL;
 		return;
 	}
-
-	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id,
-			SDE_HW_BLK_PINGPONG);
-	for (i = 0; i <= instance; i++) {
-		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
-			cmd_enc->hw_pp = (struct sde_hw_pingpong *) iter.hw;
-	}
-
-	if (IS_ERR_OR_NULL(cmd_enc->hw_pp)) {
-		SDE_ERROR_CMDENC(cmd_enc, "failed to init pingpong: %ld\n",
-				PTR_ERR(cmd_enc->hw_pp));
-		cmd_enc->hw_pp = NULL;
-		phys_enc->hw_ctl = NULL;
-		return;
-	}
-
 }
 
 static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys_cmd *cmd_enc = arg;
 	struct sde_encoder_phys *phys_enc;
-	int new_pending_cnt;
+	unsigned long lock_flags;
+	int new_cnt;
 
 	if (!cmd_enc)
 		return;
 
 	phys_enc = &cmd_enc->base;
-	new_pending_cnt = atomic_dec_return(&cmd_enc->pending_cnt);
-	MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
 
 	/* Signal any waiting atomic commit thread */
-	wake_up_all(&cmd_enc->pp_tx_done_wq);
-
-	/* Trigger a pending flush */
-	if (phys_enc->parent_ops.handle_ready_for_kickoff)
-		phys_enc->parent_ops.handle_ready_for_kickoff(phys_enc->parent,
-			phys_enc);
+	wake_up_all(&phys_enc->pending_kickoff_wq);
 }
 
 static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
@@ -151,6 +128,78 @@
 			phys_enc);
 }
 
+static bool _sde_encoder_phys_is_ppsplit_slave(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!phys_enc)
+		return false;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
+			phys_enc->split_role == ENC_ROLE_SLAVE)
+		return true;
+
+	return false;
+}
+
+static int _sde_encoder_phys_cmd_wait_for_idle(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	u32 irq_status;
+	int ret;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	/* slave encoder doesn't enable for ppsplit */
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		return 0;
+
+	/* return EWOULDBLOCK since we know the wait isn't necessary */
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
+		return -EWOULDBLOCK;
+	}
+
+	/* wait for previous kickoff to complete */
+	ret = sde_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_kickoff_cnt,
+			KICKOFF_TIMEOUT_MS);
+	if (ret <= 0) {
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				INTR_IDX_PINGPONG, true);
+		if (irq_status) {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			SDE_DEBUG_CMDENC(cmd_enc,
+					"pp:%d done but irq not triggered\n",
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
+					INTR_IDX_PINGPONG);
+			ret = 0;
+		} else {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+					phys_enc->hw_pp->idx - PINGPONG_0);
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
 static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys_cmd *cmd_enc = arg;
@@ -166,32 +215,32 @@
 }
 
 static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
-	enum sde_intr_type intr_type, int *irq_idx,
+	enum sde_intr_type intr_type, int idx,
 	void (*irq_func)(void *, int), const char *irq_name)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_irq_callback irq_cb;
 	int ret = 0;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return -EINVAL;
 	}
-	*irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms, intr_type,
-			cmd_enc->hw_pp->idx);
-	if (*irq_idx < 0) {
+
+	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			intr_type, phys_enc->hw_pp->idx);
+	if (cmd_enc->irq_idx[idx] < 0) {
 		SDE_ERROR_CMDENC(cmd_enc,
 			"failed to lookup IRQ index for %s with pp=%d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx - PINGPONG_0);
+			phys_enc->hw_pp->idx - PINGPONG_0);
 		return -EINVAL;
 	}
 
-	irq_cb.func = irq_func;
-	irq_cb.arg = cmd_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms, *irq_idx,
-			&irq_cb);
+	cmd_enc->irq_cb[idx].func = irq_func;
+	cmd_enc->irq_cb[idx].arg = cmd_enc;
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
 	if (ret) {
 		SDE_ERROR_CMDENC(cmd_enc,
 				"failed to register IRQ callback %s\n",
@@ -199,32 +248,31 @@
 		return ret;
 	}
 
-	ret = sde_core_irq_enable(phys_enc->sde_kms, irq_idx, 1);
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
 	if (ret) {
 		SDE_ERROR_CMDENC(cmd_enc,
 			"failed to enable IRQ for %s, pp %d, irq_idx %d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx - PINGPONG_0,
-			*irq_idx);
-		*irq_idx = -EINVAL;
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
+		cmd_enc->irq_idx[idx] = -EINVAL;
 
 		/* Unregister callback on IRQ enable failure */
-		sde_core_irq_register_callback(phys_enc->sde_kms, *irq_idx,
-				NULL);
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
 		return ret;
 	}
 
 	SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx - PINGPONG_0,
-			*irq_idx);
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
 
 	return ret;
 }
 
 static int sde_encoder_phys_cmd_unregister_irq(
-		struct sde_encoder_phys *phys_enc,
-		int irq_idx)
+		struct sde_encoder_phys *phys_enc, int idx)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
@@ -234,13 +282,13 @@
 		return -EINVAL;
 	}
 
-	sde_core_irq_disable(phys_enc->sde_kms, &irq_idx, 1);
-	sde_core_irq_register_callback(phys_enc->sde_kms, irq_idx,
-			NULL);
+	sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms,
+			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
-			cmd_enc->hw_pp->idx - PINGPONG_0,
-			irq_idx);
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->irq_idx[idx]);
 
 	return 0;
 }
@@ -262,10 +310,10 @@
 		return;
 	}
 
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
-	if (!cmd_enc->hw_pp->ops.setup_tearcheck ||
-		!cmd_enc->hw_pp->ops.enable_tearcheck) {
+	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+		!phys_enc->hw_pp->ops.enable_tearcheck) {
 		SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
 		return;
 	}
@@ -304,23 +352,23 @@
 
 	SDE_DEBUG_CMDENC(cmd_enc,
 		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
-		cmd_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
 		mode->vtotal, mode->vrefresh);
 	SDE_DEBUG_CMDENC(cmd_enc,
 		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
-		cmd_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
 		tc_cfg.rd_ptr_irq);
 	SDE_DEBUG_CMDENC(cmd_enc,
 		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
-		cmd_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
 		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
 	SDE_DEBUG_CMDENC(cmd_enc,
 		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
-		cmd_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
 		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
 
-	cmd_enc->hw_pp->ops.setup_tearcheck(cmd_enc->hw_pp, &tc_cfg);
-	cmd_enc->hw_pp->ops.enable_tearcheck(cmd_enc->hw_pp, tc_enable);
+	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
 }
 
 static void sde_encoder_phys_cmd_pingpong_config(
@@ -337,7 +385,7 @@
 	}
 
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
-			cmd_enc->hw_pp->idx - PINGPONG_0);
+			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
 	intf_cfg.intf = cmd_enc->intf_idx;
@@ -350,34 +398,16 @@
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
-static bool sde_encoder_phys_cmd_needs_split_flush(
+static bool sde_encoder_phys_cmd_needs_single_flush(
 		struct sde_encoder_phys *phys_enc)
 {
-	return false;
-}
+	enum sde_rm_topology_name topology;
 
-static void sde_encoder_phys_cmd_split_config(
-		struct sde_encoder_phys *phys_enc, bool enable)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_mdp *hw_mdptop = phys_enc->hw_mdptop;
-	struct split_pipe_cfg cfg = { 0 };
+	if (!phys_enc)
+		return false;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_DEBUG_CMDENC(cmd_enc, "enable %d\n", enable);
-
-	cfg.en = enable;
-	cfg.mode = INTF_MODE_CMD;
-	cfg.intf = cmd_enc->intf_idx;
-	cfg.split_flush_en = enable &&
-		sde_encoder_phys_cmd_needs_split_flush(phys_enc);
-
-	if (hw_mdptop && hw_mdptop->ops.setup_split_pipe)
-		hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	return topology == SDE_RM_TOPOLOGY_PPSPLIT;
 }
 
 static int sde_encoder_phys_cmd_control_vblank_irq(
@@ -401,27 +431,18 @@
 			__builtin_return_address(0),
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
-	MSM_EVTMSG(phys_enc->parent->dev, NULL, enable,
-			atomic_read(&phys_enc->vblank_refcount));
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
 
 	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
 		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
 				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-				&cmd_enc->irq_idx[INTR_IDX_PINGPONG],
+				INTR_IDX_RDPTR,
 				sde_encoder_phys_cmd_pp_rd_ptr_irq,
 				"pp_rd_ptr");
 	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
 		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
-
-	if (enable)
-		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
-			SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-			&cmd_enc->irq_idx[INTR_IDX_RDPTR],
-			sde_encoder_phys_cmd_pp_rd_ptr_irq, "pp_rd_ptr");
-	else
-		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
-			cmd_enc->irq_idx[INTR_IDX_RDPTR]);
+				INTR_IDX_RDPTR);
 
 end:
 	if (ret)
@@ -438,32 +459,30 @@
 		to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_hw_ctl *ctl;
 	u32 flush_mask;
-	int ret = 0;
+	int ret;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
 		return;
 	}
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
-	if (WARN_ON(phys_enc->enable_state == SDE_ENC_ENABLED))
+	if (phys_enc->enable_state == SDE_ENC_ENABLED) {
+		SDE_ERROR("already enabled\n");
 		return;
+	}
 
-	/*
-	 * Only master configures master/slave configuration, so no slave check
-	 * In solo configuration, solo encoder needs to program no-split
-	 */
-	if (phys_enc->split_role == ENC_ROLE_MASTER)
-		sde_encoder_phys_cmd_split_config(phys_enc, true);
-	else if (phys_enc->split_role == ENC_ROLE_SOLO)
-		sde_encoder_phys_cmd_split_config(phys_enc, false);
+	sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
 
 	sde_encoder_phys_cmd_pingpong_config(phys_enc);
 
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		goto update_flush;
+
 	/* Both master and slave need to register for pp_tx_done */
 	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
 			SDE_IRQ_TYPE_PING_PONG_COMP,
-			&cmd_enc->irq_idx[INTR_IDX_PINGPONG],
+			INTR_IDX_PINGPONG,
 			sde_encoder_phys_cmd_pp_tx_done_irq,
 			"pp_tx_done");
 	if (ret)
@@ -472,22 +491,23 @@
 	ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 	if (ret) {
 		sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
+				INTR_IDX_PINGPONG);
 		return;
 	}
 
 	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
 			SDE_IRQ_TYPE_INTF_UNDER_RUN,
-			&cmd_enc->irq_idx[INTR_IDX_UNDERRUN],
+			INTR_IDX_UNDERRUN,
 			sde_encoder_phys_cmd_underrun_irq,
 			"underrun");
 	if (ret) {
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
 		sde_encoder_phys_cmd_unregister_irq(phys_enc,
-				cmd_enc->irq_idx[INTR_IDX_UNDERRUN]);
+				INTR_IDX_PINGPONG);
 		return;
 	}
 
+update_flush:
 	ctl = phys_enc->hw_ctl;
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
@@ -501,24 +521,39 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
+	int ret;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
 
-	if (WARN_ON(phys_enc->enable_state == SDE_ENC_DISABLED))
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
 		return;
+	}
 
-	sde_encoder_phys_cmd_unregister_irq(phys_enc,
-			cmd_enc->irq_idx[INTR_IDX_UNDERRUN]);
-	sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
-	sde_encoder_phys_cmd_unregister_irq(phys_enc,
-			cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
 
-	atomic_set(&cmd_enc->pending_cnt, 0);
-	wake_up_all(&cmd_enc->pp_tx_done_wq);
+	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc)) {
+		ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			SDE_ERROR_CMDENC(cmd_enc,
+					"pp %d failed wait for idle, %d\n",
+					phys_enc->hw_pp->idx - PINGPONG_0, ret);
+			SDE_EVT32(DRMID(phys_enc->parent),
+					phys_enc->hw_pp->idx - PINGPONG_0, ret);
+		}
+
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_UNDERRUN);
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		sde_encoder_phys_cmd_unregister_irq(
+				phys_enc, INTR_IDX_PINGPONG);
+	}
+
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 
 	if (atomic_read(&phys_enc->vblank_refcount))
@@ -569,32 +604,31 @@
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		bool *need_to_wait)
+		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
-	int new_pending_cnt;
+	int ret;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
-	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
 
 	/*
 	 * Mark kickoff request as outstanding. If there are more than one,
 	 * outstanding, then we have to wait for the previous one to complete
 	 */
-	new_pending_cnt = atomic_inc_return(&cmd_enc->pending_cnt);
-	*need_to_wait = new_pending_cnt != 1;
-
-	if (*need_to_wait)
-		SDE_DEBUG_CMDENC(cmd_enc,
-				"pp %d needs to wait, new_pending_cnt %d",
-				cmd_enc->hw_pp->idx - PINGPONG_0,
-				new_pending_cnt);
-	MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
+	ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (ret) {
+		/* force pending_kickoff_cnt 0 to discard failed kickoff */
+		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+		SDE_EVT32(DRMID(phys_enc->parent),
+				phys_enc->hw_pp->idx - PINGPONG_0);
+		SDE_ERROR("failed wait_for_idle: %d\n", ret);
+	}
 }
 
 static void sde_encoder_phys_cmd_init_ops(
@@ -611,7 +645,7 @@
 	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
 	ops->trigger_start = sde_encoder_helper_trigger_start;
-	ops->needs_split_flush = sde_encoder_phys_cmd_needs_split_flush;
+	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -620,7 +654,7 @@
 	struct sde_encoder_phys *phys_enc = NULL;
 	struct sde_encoder_phys_cmd *cmd_enc = NULL;
 	struct sde_hw_mdp *hw_mdp;
-	int ret = 0;
+	int i, ret = 0;
 
 	SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
 
@@ -649,13 +683,14 @@
 	phys_enc->sde_kms = p->sde_kms;
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_CMD;
-	spin_lock_init(&phys_enc->spin_lock);
+	phys_enc->enc_spinlock = p->enc_spinlock;
 	cmd_enc->stream_sel = 0;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
-	atomic_set(&cmd_enc->pending_cnt, 0);
+	for (i = 0; i < INTR_IDX_MAX; i++)
+		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
-
-	init_waitqueue_head(&cmd_enc->pp_tx_done_wq);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index b94f64f..abad24d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -11,8 +11,6 @@
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/jiffies.h>
-
 #include "sde_encoder_phys.h"
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
@@ -30,15 +28,9 @@
 		(e) && (e)->hw_intf ? \
 		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
 
-#define VBLANK_TIMEOUT msecs_to_jiffies(100)
-
 #define to_sde_encoder_phys_vid(x) \
 	container_of(x, struct sde_encoder_phys_vid, base)
 
-#define DEV(phy_enc) (phy_enc->parent->dev)
-
-#define WAIT_TIMEOUT_MSEC 100
-
 static bool sde_encoder_phys_vid_is_master(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -50,22 +42,6 @@
 	return ret;
 }
 
-static void sde_encoder_phys_vid_wait_for_vblank(
-		struct sde_encoder_phys_vid *vid_enc)
-{
-	int rc = 0;
-
-	if (!vid_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-	rc = wait_for_completion_timeout(&vid_enc->vblank_completion,
-			VBLANK_TIMEOUT);
-	if (rc == 0)
-		SDE_ERROR_VIDENC(vid_enc, "timed out waiting for vblank irq\n");
-}
-
 static void drm_mode_to_intf_timing_params(
 		const struct sde_encoder_phys_vid *vid_enc,
 		const struct drm_display_mode *mode,
@@ -226,9 +202,9 @@
 		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
 		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
 
-	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
-	spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 }
 
 static bool sde_encoder_phys_vid_mode_fixup(
@@ -294,11 +270,11 @@
 	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
 	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
 
-	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
 			&timing_params, fmt);
 	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
-	spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
 	programmable_fetch_config(phys_enc, &timing_params);
 }
@@ -307,6 +283,8 @@
 {
 	struct sde_encoder_phys_vid *vid_enc = arg;
 	struct sde_encoder_phys *phys_enc;
+	unsigned long lock_flags;
+	int new_cnt;
 
 	if (!vid_enc)
 		return;
@@ -316,8 +294,14 @@
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 				phys_enc);
 
-	/* signal VBLANK completion */
-	complete_all(&vid_enc->vblank_completion);
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+			new_cnt);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
 }
 
 static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
@@ -334,44 +318,17 @@
 			phys_enc);
 }
 
-static bool sde_encoder_phys_vid_needs_split_flush(
+static bool sde_encoder_phys_vid_needs_single_flush(
 		struct sde_encoder_phys *phys_enc)
 {
 	return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
 }
 
-static void _sde_encoder_phys_vid_split_config(
-		struct sde_encoder_phys *phys_enc, bool enable)
-{
-	struct sde_encoder_phys_vid *vid_enc =
-		to_sde_encoder_phys_vid(phys_enc);
-	struct sde_hw_mdp *hw_mdptop = phys_enc->hw_mdptop;
-	struct split_pipe_cfg cfg = { 0 };
-
-	SDE_DEBUG_VIDENC(vid_enc, "enable %d\n", enable);
-
-	cfg.en = enable;
-	cfg.mode = INTF_MODE_VIDEO;
-	cfg.intf = vid_enc->hw_intf->idx;
-	cfg.split_flush_en = enable &&
-		sde_encoder_phys_vid_needs_split_flush(phys_enc);
-
-	/* Configure split pipe control to handle master/slave triggering */
-	if (hw_mdptop && hw_mdptop->ops.setup_split_pipe) {
-		unsigned long lock_flags;
-
-		spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
-		hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
-		spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
-	}
-}
-
 static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
-	enum sde_intr_type intr_type, int *irq_idx,
+	enum sde_intr_type intr_type, int idx,
 	void (*irq_func)(void *, int), const char *irq_name)
 {
 	struct sde_encoder_phys_vid *vid_enc;
-	struct sde_irq_callback irq_cb;
 	int ret = 0;
 
 	if (!phys_enc) {
@@ -380,46 +337,46 @@
 	}
 
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	*irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms, intr_type,
-			vid_enc->hw_intf->idx);
-	if (*irq_idx < 0) {
+	vid_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			intr_type, vid_enc->hw_intf->idx);
+	if (vid_enc->irq_idx[idx] < 0) {
 		SDE_ERROR_VIDENC(vid_enc,
 			"failed to lookup IRQ index for %s type:%d\n", irq_name,
 			intr_type);
 		return -EINVAL;
 	}
 
-	irq_cb.func = irq_func;
-	irq_cb.arg = vid_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms, *irq_idx,
-			&irq_cb);
+	vid_enc->irq_cb[idx].func = irq_func;
+	vid_enc->irq_cb[idx].arg = vid_enc;
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
 	if (ret) {
 		SDE_ERROR_VIDENC(vid_enc,
 			"failed to register IRQ callback for %s\n", irq_name);
 		return ret;
 	}
 
-	ret = sde_core_irq_enable(phys_enc->sde_kms, irq_idx, true);
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
 	if (ret) {
 		SDE_ERROR_VIDENC(vid_enc,
 			"enable IRQ for intr:%s failed, irq_idx %d\n",
-			irq_name, *irq_idx);
-		*irq_idx = -EINVAL;
+			irq_name, vid_enc->irq_idx[idx]);
+		vid_enc->irq_idx[idx] = -EINVAL;
 
 		/* unregister callback on IRQ enable failure */
-		sde_core_irq_register_callback(phys_enc->sde_kms,
-						*irq_idx, NULL);
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
 		return ret;
 	}
 
 	SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
-						irq_name, *irq_idx);
+			irq_name, vid_enc->irq_idx[idx]);
 
 	return ret;
 }
 
 static int sde_encoder_phys_vid_unregister_irq(
-	struct sde_encoder_phys *phys_enc, int irq_idx)
+	struct sde_encoder_phys *phys_enc, int idx)
 {
 	struct sde_encoder_phys_vid *vid_enc;
 
@@ -429,11 +386,12 @@
 	}
 
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	sde_core_irq_disable(phys_enc->sde_kms, &irq_idx, 1);
+	sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
 
-	sde_core_irq_register_callback(phys_enc->sde_kms, irq_idx, NULL);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms,
+			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
 
-	SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", irq_idx);
+	SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", vid_enc->irq_idx[idx]);
 
 end:
 	return 0;
@@ -444,16 +402,17 @@
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
-	struct sde_rm *rm = &phys_enc->sde_kms->rm;
+	struct sde_rm *rm;
 	struct sde_rm_hw_iter iter;
 	int i, instance;
 	struct sde_encoder_phys_vid *vid_enc;
 
-	if (!phys_enc) {
+	if (!phys_enc || !phys_enc->sde_kms) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
+	rm = &phys_enc->sde_kms->rm;
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
 	phys_enc->cached_mode = *adj_mode;
 	SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
@@ -464,11 +423,9 @@
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
-			phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
+		if (sde_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
 	}
-
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
 		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
 				PTR_ERR(phys_enc->hw_ctl));
@@ -499,17 +456,17 @@
 			__builtin_return_address(0),
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
-	MSM_EVTMSG(phys_enc->parent->dev, NULL, enable,
+	SDE_EVT32(DRMID(phys_enc->parent), enable,
 			atomic_read(&phys_enc->vblank_refcount));
 
 	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
 		ret = sde_encoder_phys_vid_register_irq(phys_enc,
 			SDE_IRQ_TYPE_INTF_VSYNC,
-			&vid_enc->irq_idx[INTR_IDX_VSYNC],
+			INTR_IDX_VSYNC,
 			sde_encoder_phys_vid_vblank_irq, "vsync_irq");
 	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
 		ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
-			vid_enc->irq_idx[INTR_IDX_VSYNC]);
+			INTR_IDX_VSYNC);
 
 	if (ret)
 		SDE_ERROR_VIDENC(vid_enc,
@@ -546,10 +503,7 @@
 	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
 		return;
 
-	if (phys_enc->split_role == ENC_ROLE_MASTER)
-		_sde_encoder_phys_vid_split_config(phys_enc, true);
-	else if (phys_enc->split_role == ENC_ROLE_SOLO)
-		_sde_encoder_phys_vid_split_config(phys_enc, false);
+	sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
 
 	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
 	ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
@@ -558,7 +512,7 @@
 
 	ret = sde_encoder_phys_vid_register_irq(phys_enc,
 		SDE_IRQ_TYPE_INTF_UNDER_RUN,
-		&vid_enc->irq_idx[INTR_IDX_UNDERRUN],
+		INTR_IDX_UNDERRUN,
 		sde_encoder_phys_vid_underrun_irq, "underrun");
 	if (ret) {
 		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
@@ -579,59 +533,6 @@
 	return;
 }
 
-static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
-{
-	unsigned long lock_flags;
-	struct sde_encoder_phys_vid *vid_enc;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
-				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
-		return;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "\n");
-
-	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
-		return;
-
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR("already disabled\n");
-		return;
-	}
-
-	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
-	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
-	reinit_completion(&vid_enc->vblank_completion);
-	phys_enc->enable_state = SDE_ENC_DISABLED;
-	spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
-
-	/*
-	 * Wait for a vsync so we know the ENABLE=0 latched before
-	 * the (connector) source of the vsync's gets disabled,
-	 * otherwise we end up in a funny state if we re-enable
-	 * before the disable latches, which results that some of
-	 * the settings changes for the new modeset (like new
-	 * scanout buffer) don't latch properly..
-	 */
-	if (sde_encoder_phys_vid_is_master(phys_enc)) {
-		sde_encoder_phys_vid_wait_for_vblank(vid_enc);
-		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
-	}
-
-	if (atomic_read(&phys_enc->vblank_refcount))
-		SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
-				phys_enc->parent->base.id,
-				phys_enc->split_role,
-				atomic_read(&phys_enc->vblank_refcount));
-}
-
 static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_vid *vid_enc;
@@ -672,9 +573,10 @@
 static int sde_encoder_phys_vid_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
-	unsigned long ret;
 	struct sde_encoder_phys_vid *vid_enc =
 			to_sde_encoder_phys_vid(phys_enc);
+	u32 irq_status;
+	int ret;
 
 	if (!sde_encoder_phys_vid_is_master(phys_enc))
 		return 0;
@@ -684,34 +586,99 @@
 		return -EWOULDBLOCK;
 	}
 
-	MSM_EVTMSG(DEV(phys_enc), "waiting", 0, 0);
+	SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+			SDE_EVTLOG_FUNC_ENTRY);
 
-	ret = wait_for_completion_timeout(&vid_enc->vblank_completion,
-			msecs_to_jiffies(WAIT_TIMEOUT_MSEC));
-	if (!ret) {
-		SDE_DEBUG_VIDENC(vid_enc, "wait %u ms timed out\n",
-				WAIT_TIMEOUT_MSEC);
-		MSM_EVTMSG(DEV(phys_enc), "wait_timeout", 0, 0);
-		return -ETIMEDOUT;
+	/* Wait for kickoff to complete */
+	ret = sde_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			vid_enc->hw_intf->idx - INTF_0,
+			&phys_enc->pending_kickoff_wq,
+			&phys_enc->pending_kickoff_cnt,
+			KICKOFF_TIMEOUT_MS);
+	if (ret <= 0) {
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				INTR_IDX_VSYNC, true);
+		if (irq_status) {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0);
+			SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n");
+			sde_encoder_phys_vid_vblank_irq(vid_enc,
+					INTR_IDX_VSYNC);
+			ret = 0;
+		} else {
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0);
+			SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n");
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		ret = 0;
 	}
 
-	MSM_EVTMSG(DEV(phys_enc), "wait_done", 0, 0);
-
 	return 0;
 }
 
-static void sde_encoder_phys_vid_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		bool *need_to_wait)
+static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_vid *vid_enc =
-			to_sde_encoder_phys_vid(phys_enc);
+	struct sde_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
+	int ret;
 
-	/* Vid encoder is simple, kickoff is immediate */
-	*need_to_wait = false;
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
 
-	/* Reset completion to wait for the next vblank */
-	reinit_completion(&vid_enc->vblank_completion);
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR("already disabled\n");
+		return;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+	if (sde_encoder_phys_vid_is_master(phys_enc))
+		sde_encoder_phys_inc_pending(phys_enc);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/*
+	 * Wait for a vsync so we know the ENABLE=0 latched before
+	 * the (connector) source of the vsync's gets disabled,
+	 * otherwise we end up in a funny state if we re-enable
+	 * before the disable latches, which results that some of
+	 * the settings changes for the new modeset (like new
+	 * scanout buffer) don't latch properly..
+	 */
+	if (sde_encoder_phys_vid_is_master(phys_enc)) {
+		ret = sde_encoder_phys_vid_wait_for_commit_done(phys_enc);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			SDE_ERROR_VIDENC(vid_enc,
+					"failure waiting for disable: %d\n",
+					ret);
+			SDE_EVT32(DRMID(phys_enc->parent),
+					vid_enc->hw_intf->idx - INTF_0, ret);
+		}
+		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+	}
+
+	if (atomic_read(&phys_enc->vblank_refcount))
+		SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
+				atomic_read(&phys_enc->vblank_refcount));
+
+	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
 static void sde_encoder_phys_vid_handle_post_kickoff(
@@ -733,14 +700,35 @@
 	 * Video encoders need to turn on their interfaces now
 	 */
 	if (phys_enc->enable_state == SDE_ENC_ENABLING) {
-		MSM_EVT(DEV(phys_enc), 0, 0);
-		spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+		SDE_EVT32(DRMID(phys_enc->parent),
+				vid_enc->hw_intf->idx - INTF_0);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 		vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
-		spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 		phys_enc->enable_state = SDE_ENC_ENABLED;
 	}
 }
 
+static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
+
+	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
+			struct sde_misr_params *misr_map)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+			to_sde_encoder_phys_vid(phys_enc);
+
+	if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+}
+
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 {
 	ops->is_master = sde_encoder_phys_vid_is_master;
@@ -752,9 +740,10 @@
 	ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
 	ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
 	ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
-	ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
 	ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
-	ops->needs_split_flush = sde_encoder_phys_vid_needs_split_flush;
+	ops->needs_single_flush = sde_encoder_phys_vid_needs_single_flush;
+	ops->setup_misr = sde_encoder_phys_vid_setup_misr;
+	ops->collect_misr = sde_encoder_phys_vid_collect_misr;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(
@@ -764,7 +753,7 @@
 	struct sde_encoder_phys_vid *vid_enc = NULL;
 	struct sde_rm_hw_iter iter;
 	struct sde_hw_mdp *hw_mdp;
-	int ret = 0;
+	int i, ret = 0;
 
 	if (!p) {
 		ret = -EINVAL;
@@ -776,7 +765,6 @@
 		ret = -ENOMEM;
 		goto fail;
 	}
-	init_completion(&vid_enc->vblank_completion);
 
 	phys_enc = &vid_enc->base;
 
@@ -809,6 +797,13 @@
 		goto fail;
 	}
 
+	phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
+						GFP_KERNEL);
+	if (!phys_enc->misr_map) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
 	SDE_DEBUG_VIDENC(vid_enc, "\n");
 
 	sde_encoder_phys_vid_init_ops(&phys_enc->ops);
@@ -817,9 +812,12 @@
 	phys_enc->sde_kms = p->sde_kms;
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_VIDEO;
-	spin_lock_init(&phys_enc->spin_lock);
-	init_completion(&vid_enc->vblank_completion);
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	for (i = 0; i < INTR_IDX_MAX; i++)
+		INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 
 	SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 665b044..768f59c 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -13,8 +13,6 @@
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/jiffies.h>
 #include <linux/debugfs.h>
 
 #include "sde_encoder_phys.h"
@@ -25,13 +23,10 @@
 #include "sde_wb.h"
 #include "sde_vbif.h"
 
-/* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define WAIT_TIMEOUT_MSEC			84
-
 #define to_sde_encoder_phys_wb(x) \
 	container_of(x, struct sde_encoder_phys_wb, base)
 
-#define DEV(phy_enc) (phy_enc->parent->dev)
+#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
 
 /**
  * sde_encoder_phys_wb_is_master - report wb always as master encoder
@@ -121,7 +116,8 @@
 	cdm_cfg->output_height = wb_roi->h;
 	cdm_cfg->output_fmt = format;
 	cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
-	cdm_cfg->output_bit_depth = CDM_CDWN_OUTPUT_8BIT;
+	cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+		CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
 
 	/* enable 10 bit logic */
 	switch (cdm_cfg->output_fmt->chroma_sample) {
@@ -517,8 +513,8 @@
 		return 0;
 
 	sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
-	sde_core_irq_register_callback(phys_enc->sde_kms, wb_enc->irq_idx,
-			NULL);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx,
+			&wb_enc->irq_cb);
 
 	SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
 			hw_wb->idx - WB_0,
@@ -555,7 +551,7 @@
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
-	struct sde_irq_callback irq_cb;
+	struct sde_irq_callback *irq_cb = &wb_enc->irq_cb;
 	enum sde_intr_type intr_type;
 	int ret = 0;
 
@@ -572,10 +568,10 @@
 		return -EINVAL;
 	}
 
-	irq_cb.func = sde_encoder_phys_wb_done_irq;
-	irq_cb.arg = wb_enc;
+	irq_cb->func = sde_encoder_phys_wb_done_irq;
+	irq_cb->arg = wb_enc;
 	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
-			wb_enc->irq_idx, &irq_cb);
+			wb_enc->irq_idx, irq_cb);
 	if (ret) {
 		SDE_ERROR("failed to register IRQ callback WB_DONE\n");
 		return ret;
@@ -590,8 +586,8 @@
 		wb_enc->irq_idx = -EINVAL;
 
 		/* Unregister callback on IRQ enable failure */
-		sde_core_irq_register_callback(phys_enc->sde_kms,
-				wb_enc->irq_idx, NULL);
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				wb_enc->irq_idx, irq_cb);
 		return ret;
 	}
 
@@ -675,13 +671,14 @@
 	if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
 		return -EWOULDBLOCK;
 
-	MSM_EVT(DEV(phys_enc), wb_enc->frame_count, 0);
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count);
 
 	ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
-			msecs_to_jiffies(wb_enc->wbdone_timeout));
+			KICKOFF_TIMEOUT_JIFFIES);
 
 	if (!ret) {
-		MSM_EVT(DEV(phys_enc), wb_enc->frame_count, 0);
+		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
+				wb_enc->frame_count);
 
 		irq_status = sde_core_irq_read(phys_enc->sde_kms,
 				wb_enc->irq_idx, true);
@@ -719,7 +716,8 @@
 			wb_enc->wb_dev->wb_idx - WB_0, wb_time);
 	}
 
-	MSM_EVT(DEV(phys_enc), wb_enc->frame_count, wb_time);
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
+			wb_time);
 
 	return rc;
 }
@@ -727,11 +725,9 @@
 /**
  * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
  * @phys_enc:	Pointer to physical encoder
- * @need_to_wait:	 Wait for next submission
  */
 static void sde_encoder_phys_wb_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc,
-		bool *need_to_wait)
+		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	int ret;
@@ -739,8 +735,6 @@
 	SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
 			wb_enc->kickoff_count);
 
-	*need_to_wait = false;
-
 	reinit_completion(&wb_enc->wbdone_complete);
 
 	ret = sde_encoder_phys_wb_register_irq(phys_enc);
@@ -759,7 +753,7 @@
 	/* vote for iommu/clk/bus */
 	wb_enc->start_time = ktime_get();
 
-	MSM_EVT(DEV(phys_enc), *need_to_wait, wb_enc->kickoff_count);
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
 }
 
 /**
@@ -773,7 +767,7 @@
 
 	SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
 
-	MSM_EVT(DEV(phys_enc), 0, 0);
+	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc));
 }
 
 /**
@@ -1009,7 +1003,7 @@
 		goto fail_alloc;
 	}
 	wb_enc->irq_idx = -EINVAL;
-	wb_enc->wbdone_timeout = WAIT_TIMEOUT_MSEC;
+	wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
 	init_completion(&wb_enc->wbdone_complete);
 
 	phys_enc = &wb_enc->base;
@@ -1069,7 +1063,8 @@
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_WB_LINE;
 	phys_enc->intf_idx = p->intf_idx;
-	spin_lock_init(&phys_enc->spin_lock);
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	INIT_LIST_HEAD(&wb_enc->irq_cb.list);
 
 	ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
 	if (ret) {
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 23e5614..6db6f98 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -105,14 +105,15 @@
 /**
  * SDE_FENCE_TIMELINE_NAME - macro for accessing s/w timeline's name
  * @fence: Pointer to sde fence structure
+ * @drm_id: ID number of owning DRM Object
  * Returns: Pointer to timeline name string
  */
 #define SDE_FENCE_TIMELINE_NAME(fence) \
 	(((struct sw_sync_timeline *)fence->timeline)->obj.name)
 
-int sde_fence_init(void *dev,
-		struct sde_fence *fence,
-		const char *name)
+int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id)
 {
 	if (!fence) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -125,9 +126,9 @@
 		return -ENOMEM;
 	}
 
-	fence->dev = dev;
 	fence->commit_count = 0;
 	fence->done_count = 0;
+	fence->drm_id = drm_id;
 
 	mutex_init(&fence->fence_lock);
 	return 0;
@@ -155,10 +156,7 @@
 
 	mutex_lock(&fence->fence_lock);
 	++fence->commit_count;
-	MSM_EVTMSG(fence->dev,
-			SDE_FENCE_TIMELINE_NAME(fence),
-			fence->commit_count,
-			fence->done_count);
+	SDE_EVT32(fence->drm_id, fence->commit_count, fence->done_count);
 	mutex_unlock(&fence->fence_lock);
 	return 0;
 }
@@ -187,10 +185,7 @@
 				trigger_value);
 		*val = fd;
 
-		MSM_EVTMSG(fence->dev,
-				SDE_FENCE_TIMELINE_NAME(fence),
-				trigger_value,
-				fd);
+		SDE_EVT32(fence->drm_id, trigger_value, fd);
 		mutex_unlock(&fence->fence_lock);
 
 		if (fd >= 0)
@@ -228,11 +223,10 @@
 		else
 			sw_sync_timeline_inc(fence->timeline, (int)val);
 	}
-	MSM_EVTMSG(fence->dev,
-			SDE_FENCE_TIMELINE_NAME(fence),
-			fence->done_count,
-			((struct sw_sync_timeline *)
-				fence->timeline)->value);
+
+	SDE_EVT32(fence->drm_id, fence->done_count,
+			((struct sw_sync_timeline *) fence->timeline)->value);
+
 	mutex_unlock(&fence->fence_lock);
 }
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index b5980b4..113d16b 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -85,30 +85,30 @@
 /**
  * struct sde_fence - output fence container structure
  * @timeline: Pointer to fence timeline
- * @dev: Pointer to drm device structure
  * @commit_count: Number of detected commits since bootup
  * @done_count: Number of completed commits since bootup
+ * @drm_id: ID number of owning DRM Object
  * @fence_lock: Mutex object to protect local fence variables
  */
 struct sde_fence {
 	void *timeline;
-	void *dev;
 	int32_t commit_count;
 	int32_t done_count;
+	uint32_t drm_id;
 	struct mutex fence_lock;
 };
 
 #if IS_ENABLED(CONFIG_SW_SYNC)
 /**
  * sde_fence_init - initialize fence object
- * @dev: Pointer to drm device structure
  * @fence: Pointer to crtc fence object
+ * @drm_id: ID number of owning DRM Object
  * @name: Timeline name
  * Returns: Zero on success
  */
-int sde_fence_init(void *dev,
-		struct sde_fence *fence,
-		const char *name);
+int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id);
 
 /**
  * sde_fence_deinit - deinit fence container
@@ -139,9 +139,9 @@
  */
 void sde_fence_signal(struct sde_fence *fence, bool is_error);
 #else
-static inline int sde_fence_init(void *dev,
-		struct sde_fence *fence,
-		const char *name)
+static inline int sde_fence_init(struct sde_fence *fence,
+		const char *name,
+		uint32_t drm_id)
 {
 	/* do nothing */
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 5895158..41180f5 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -64,7 +64,7 @@
 	.num_planes = np                                                  \
 }
 
-#define PSEDUO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
 {                                                                         \
 	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
 	.fetch_planes = SDE_PLANE_PSEUDO_PLANAR,                          \
@@ -112,6 +112,12 @@
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
+	INTERLEAVED_RGB_FMT(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		SDE_FETCH_LINEAR, 1),
+
 	INTERLEAVED_RGB_FMT(RGBA8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
@@ -262,25 +268,73 @@
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
-	PSEDUO_YUV_FMT(NV12,
+	INTERLEAVED_RGB_FMT(BGRA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_LINEAR, 1),
+
+	PSEUDO_YUV_FMT(NV12,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
 		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
 		SDE_FETCH_LINEAR, 2),
 
-	PSEDUO_YUV_FMT(NV21,
+	PSEUDO_YUV_FMT(NV21,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C1_B_Cb,
 		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
 		SDE_FETCH_LINEAR, 2),
 
-	PSEDUO_YUV_FMT(NV16,
+	PSEUDO_YUV_FMT(NV16,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
 		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
 		SDE_FETCH_LINEAR, 2),
 
-	PSEDUO_YUV_FMT(NV61,
+	PSEUDO_YUV_FMT(NV61,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C2_R_Cr, C1_B_Cb,
 		SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
@@ -348,7 +402,19 @@
 		false, 4, 0,
 		SDE_FETCH_UBWC, 2),
 
-	PSEDUO_YUV_FMT(NV12,
+	INTERLEAVED_RGB_FMT(RGBA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_UBWC, 2),
+
+	INTERLEAVED_RGB_FMT(RGBX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, SDE_FORMAT_FLAG_DX,
+		SDE_FETCH_UBWC, 2),
+
+	PSEUDO_YUV_FMT(NV12,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
 		C1_B_Cb, C2_R_Cr,
 		SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
@@ -449,6 +515,8 @@
 
 	} else if (fmt->base.pixel_format == DRM_FORMAT_RGBA8888 ||
 		fmt->base.pixel_format == DRM_FORMAT_RGBX8888    ||
+		fmt->base.pixel_format == DRM_FORMAT_RGBA1010102 ||
+		fmt->base.pixel_format == DRM_FORMAT_RGBX1010102 ||
 		fmt->base.pixel_format == DRM_FORMAT_RGB565) {
 		uint32_t stride_alignment, aligned_bitstream_width;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 7c01596..6394f46 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -74,12 +74,22 @@
 #define DEFAULT_CREQ_LUT_NRT		0x0
 #define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
 
+/* access property value based on prop_type and hardware index */
+#define PROP_VALUE_ACCESS(p, i, j)		((p + i)->value[j])
+
+/*
+ * access element within PROP_TYPE_BIT_OFFSET_ARRAYs based on prop_type,
+ * hardware index and offset array index
+ */
+#define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
+
 /*************************************************************
  *  DTSI PROPERTY INDEX
  *************************************************************/
 enum {
 	HW_OFF,
 	HW_LEN,
+	HW_PROP_MAX,
 };
 
 enum sde_prop {
@@ -91,9 +101,11 @@
 	WB_LINEWIDTH,
 	BANK_BIT,
 	QSEED_TYPE,
+	CSC_TYPE,
 	PANIC_PER_PIPE,
 	CDP,
 	SRC_SPLIT,
+	SDE_PROP_MAX,
 };
 
 enum {
@@ -107,8 +119,24 @@
 	SSPP_SAFE,
 	SSPP_MAX_RECTS,
 	SSPP_SCALE_SIZE,
-	SSPP_QSEED_OFF,
-	SSPP_CSC_OFF,
+	SSPP_VIG_BLOCKS,
+	SSPP_RGB_BLOCKS,
+	SSPP_PROP_MAX,
+};
+
+enum {
+	VIG_QSEED_OFF,
+	VIG_CSC_OFF,
+	VIG_HSIC_PROP,
+	VIG_MEMCOLOR_PROP,
+	VIG_PCC_PROP,
+	VIG_PROP_MAX,
+};
+
+enum {
+	RGB_SCALER_OFF,
+	RGB_PCC_PROP,
+	RGB_PROP_MAX,
 };
 
 enum {
@@ -116,6 +144,7 @@
 	INTF_LEN,
 	INTF_PREFETCH,
 	INTF_TYPE,
+	INTF_PROP_MAX,
 };
 
 enum {
@@ -127,25 +156,47 @@
 	TE2_LEN,
 	DSC_OFF,
 	DSC_LEN,
+	PP_SLAVE,
+	PP_PROP_MAX,
 };
 
 enum {
 	DSPP_OFF,
 	DSPP_SIZE,
-	DSPP_IGC,
-	DSPP_PCC,
-	DSPP_GC,
-	DSPP_PA,
-	DSPP_GAMUT,
-	DSPP_DITHER,
-	DSPP_HIST,
-	DSPP_AD,
+	DSPP_BLOCKS,
+	DSPP_PROP_MAX,
+};
+
+enum {
+	DSPP_IGC_PROP,
+	DSPP_PCC_PROP,
+	DSPP_GC_PROP,
+	DSPP_HSIC_PROP,
+	DSPP_MEMCOLOR_PROP,
+	DSPP_SIXZONE_PROP,
+	DSPP_GAMUT_PROP,
+	DSPP_DITHER_PROP,
+	DSPP_HIST_PROP,
+	DSPP_VLUT_PROP,
+	DSPP_BLOCKS_PROP_MAX,
+};
+
+enum {
+	AD_OFF,
+	AD_VERSION,
+	AD_PROP_MAX,
 };
 
 enum {
 	MIXER_OFF,
 	MIXER_LEN,
-	MIXER_GC,
+	MIXER_BLOCKS,
+	MIXER_PROP_MAX,
+};
+
+enum {
+	MIXER_GC_PROP,
+	MIXER_BLOCKS_PROP_MAX,
 };
 
 enum {
@@ -154,6 +205,7 @@
 	WB_ID,
 	WB_XIN_ID,
 	WB_CLK_CTRL,
+	WB_PROP_MAX,
 };
 
 enum {
@@ -164,6 +216,7 @@
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
+	VBIF_PROP_MAX,
 };
 
 /*************************************************************
@@ -176,6 +229,7 @@
 	PROP_TYPE_STRING,
 	PROP_TYPE_STRING_ARRAY,
 	PROP_TYPE_BIT_OFFSET_ARRAY,
+	PROP_TYPE_NODE,
 };
 
 struct sde_prop_type {
@@ -192,6 +246,11 @@
 	enum prop_type type;
 };
 
+struct sde_prop_value {
+	u32 value[MAX_SDE_HW_BLK];
+	u32 bit_value[MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+};
+
 /*************************************************************
  * dts property list
  *************************************************************/
@@ -204,6 +263,7 @@
 	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
 	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
 	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
+	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
 	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
 	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
 	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
@@ -222,8 +282,22 @@
 	{SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
-	{SSPP_QSEED_OFF, "qcom,sde-sspp-qseed-off", false, PROP_TYPE_U32},
-	{SSPP_CSC_OFF, "qcom,sde-sspp-csc-off", false, PROP_TYPE_U32},
+	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
+	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type vig_prop[] = {
+	{VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32},
+	{VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32},
+	{VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY},
+	{VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false,
+		PROP_TYPE_U32_ARRAY},
+	{VIG_PCC_PROP, "qcom,sde-vig-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type rgb_prop[] = {
+	{RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
+	{RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type ctl_prop[] = {
@@ -234,20 +308,37 @@
 static struct sde_prop_type mixer_prop[] = {
 	{MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
 	{MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
-	{MIXER_GC, "qcom,sde-has-mixer-gc", false, PROP_TYPE_BOOL},
+	{MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type mixer_blocks_prop[] = {
+	{MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type dspp_prop[] = {
 	{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
 	{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
-	{DSPP_IGC, "qcom,sde-dspp-igc-off", false, PROP_TYPE_U32},
-	{DSPP_PCC, "qcom,sde-dspp-pcc-off", false, PROP_TYPE_U32},
-	{DSPP_GC, "qcom,sde-dspp-gc-off", false, PROP_TYPE_U32},
-	{DSPP_PA, "qcom,sde-dspp-pa-off", false, PROP_TYPE_U32},
-	{DSPP_GAMUT, "qcom,sde-dspp-gamut-off", false, PROP_TYPE_U32},
-	{DSPP_DITHER, "qcom,sde-dspp-dither-off", false, PROP_TYPE_U32},
-	{DSPP_HIST, "qcom,sde-dspp-hist-off", false, PROP_TYPE_U32},
-	{DSPP_AD, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_BLOCKS, "qcom,sde-dspp-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type dspp_blocks_prop[] = {
+	{DSPP_IGC_PROP, "qcom,sde-dspp-igc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_PCC_PROP, "qcom,sde-dspp-pcc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_GC_PROP, "qcom,sde-dspp-gc", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_HSIC_PROP, "qcom,sde-dspp-hsic", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_MEMCOLOR_PROP, "qcom,sde-dspp-memcolor", false,
+		PROP_TYPE_U32_ARRAY},
+	{DSPP_SIXZONE_PROP, "qcom,sde-dspp-sixzone", false,
+		PROP_TYPE_U32_ARRAY},
+	{DSPP_GAMUT_PROP, "qcom,sde-dspp-gamut", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_DITHER_PROP, "qcom,sde-dspp-dither", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_HIST_PROP, "qcom,sde-dspp-hist", false, PROP_TYPE_U32_ARRAY},
+	{DSPP_VLUT_PROP, "qcom,sde-dspp-vlut", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ad_prop[] = {
+	{AD_OFF, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+	{AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type pp_prop[] = {
@@ -259,6 +350,7 @@
 	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
 	{DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
 	{DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
+	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type cdm_prop[] = {
@@ -316,7 +408,7 @@
 }
 
 static int _parse_dt_bit_offset(struct device_node *np,
-	char *prop_name, u32 prop_value[][MAX_BIT_OFFSET],
+	char *prop_name, struct sde_prop_value *prop_value, u32 prop_index,
 	u32 count, bool mandatory)
 {
 	int rc = 0, len, i, j;
@@ -325,10 +417,13 @@
 	arr = of_get_property(np, prop_name, &len);
 	if (arr) {
 		len /= sizeof(u32);
+		len &= ~0x1;
 		for (i = 0, j = 0; i < len; j++) {
-			prop_value[j][0] = be32_to_cpu(arr[i]);
+			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
+				be32_to_cpu(arr[i]);
 			i++;
-			prop_value[j][1] = be32_to_cpu(arr[i]);
+			PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 1) =
+				be32_to_cpu(arr[i]);
 			i++;
 		}
 	} else {
@@ -350,16 +445,24 @@
 	int *off_count)
 {
 	int rc = 0, i, val;
+	struct device_node *snp = NULL;
 
-	*off_count = of_property_count_u32_elems(np, sde_prop[0].prop_name);
-	if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
-		SDE_ERROR("invalid hw offset prop name:%s count:%d\n",
-			sde_prop[0].prop_name, *off_count);
-		*off_count = 0;
-		return sde_prop[0].is_mandatory ? -EINVAL : 0;
+	if (off_count) {
+		*off_count = of_property_count_u32_elems(np,
+				sde_prop[0].prop_name);
+		if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
+			if (sde_prop[0].is_mandatory) {
+				SDE_ERROR("invalid hw offset prop name:%s\"\
+					  count: %d\n",
+					sde_prop[0].prop_name, *off_count);
+				rc = -EINVAL;
+			}
+			*off_count = 0;
+			return rc;
+		}
 	}
 
-	for (i = 0; i < prop_size && i < MAX_BLOCKS; i++) {
+	for (i = 0; i < prop_size; i++) {
 		switch (sde_prop[i].type) {
 		case PROP_TYPE_U32:
 			rc = of_property_read_u32(np, sde_prop[i].prop_name,
@@ -368,15 +471,25 @@
 		case PROP_TYPE_U32_ARRAY:
 			prop_count[i] = of_property_count_u32_elems(np,
 				sde_prop[i].prop_name);
+			if (prop_count[i] < 0)
+				rc = prop_count[i];
 			break;
 		case PROP_TYPE_STRING_ARRAY:
 			prop_count[i] = of_property_count_strings(np,
 				sde_prop[i].prop_name);
+			if (prop_count[i] < 0)
+				rc = prop_count[i];
 			break;
 		case PROP_TYPE_BIT_OFFSET_ARRAY:
 			of_get_property(np, sde_prop[i].prop_name, &val);
 			prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
 			break;
+		case PROP_TYPE_NODE:
+			snp = of_get_child_by_name(np,
+					sde_prop[i].prop_name);
+			if (!snp)
+				rc = -EINVAL;
+			break;
 		default:
 			SDE_DEBUG("invalid property type:%d\n",
 							sde_prop[i].type);
@@ -387,29 +500,44 @@
 			sde_prop[i].type, prop_count[i]);
 
 		if (rc && sde_prop[i].is_mandatory &&
-		   (sde_prop[i].type == PROP_TYPE_U32)) {
+		   ((sde_prop[i].type == PROP_TYPE_U32) ||
+		    (sde_prop[i].type == PROP_TYPE_NODE))) {
 			SDE_ERROR("prop:%s not present\n",
 						sde_prop[i].prop_name);
 			goto end;
 		} else if (sde_prop[i].type == PROP_TYPE_U32 ||
-			sde_prop[i].type == PROP_TYPE_BOOL) {
+			sde_prop[i].type == PROP_TYPE_BOOL ||
+			sde_prop[i].type == PROP_TYPE_NODE) {
 			rc = 0;
 			continue;
 		}
 
-		if ((prop_count[i] != *off_count) && sde_prop[i].is_mandatory) {
+		if (off_count && (prop_count[i] != *off_count) &&
+				sde_prop[i].is_mandatory) {
 			SDE_ERROR("prop:%s count:%d is different compared to \"\
 				offset array:%d\n", sde_prop[i].prop_name,
 				prop_count[i], *off_count);
 			rc = -EINVAL;
 			goto end;
-		} else if (prop_count[i] != *off_count) {
+		} else if (off_count && prop_count[i] != *off_count) {
 			SDE_DEBUG("prop:%s count:%d is different compared to \"\
 				offset array:%d\n", sde_prop[i].prop_name,
 				prop_count[i], *off_count);
 			rc = 0;
 			prop_count[i] = 0;
 		}
+		if (!off_count && prop_count[i] < 0) {
+			prop_count[i] = 0;
+			if (sde_prop[i].is_mandatory) {
+				SDE_ERROR("prop:%s count:%d is negative\n",
+					sde_prop[i].prop_name, prop_count[i]);
+				rc = -EINVAL;
+			} else {
+				rc = 0;
+				SDE_DEBUG("prop:%s count:%d is negative\n",
+					sde_prop[i].prop_name, prop_count[i]);
+			}
+		}
 	}
 
 end:
@@ -417,66 +545,86 @@
 }
 
 static int _read_dt_entry(struct device_node *np,
-	struct sde_prop_type *sde_prop, u32 prop_size, u32 *prop_count,
-	u32 prop_value[][MAX_SDE_HW_BLK],
-	u32 bit_value[][MAX_SDE_HW_BLK][MAX_BIT_OFFSET])
+	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+	bool *prop_exists,
+	struct sde_prop_value *prop_value)
 {
 	int rc = 0, i, j;
 
-	for (i = 0; i < prop_size && i < MAX_BLOCKS; i++) {
+	for (i = 0; i < prop_size; i++) {
+		prop_exists[i] = true;
 		switch (sde_prop[i].type) {
 		case PROP_TYPE_U32:
-			of_property_read_u32(np, sde_prop[i].prop_name,
-				&prop_value[i][0]);
+			rc = of_property_read_u32(np, sde_prop[i].prop_name,
+				&PROP_VALUE_ACCESS(prop_value, i, 0));
 			SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
 				 value:0x%x\n", i, sde_prop[i].prop_name,
-				sde_prop[i].type, prop_value[i][0]);
+				sde_prop[i].type,
+				PROP_VALUE_ACCESS(prop_value, i, 0));
+			if (rc)
+				prop_exists[i] = false;
 			break;
 		case PROP_TYPE_BOOL:
-			prop_value[i][0] =  of_property_read_bool(np,
-				sde_prop[i].prop_name);
+			PROP_VALUE_ACCESS(prop_value, i, 0) =
+				of_property_read_bool(np,
+					sde_prop[i].prop_name);
 			SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
 				value:0x%x\n", i, sde_prop[i].prop_name,
-				sde_prop[i].type, prop_value[i][0]);
+				sde_prop[i].type,
+				PROP_VALUE_ACCESS(prop_value, i, 0));
 			break;
 		case PROP_TYPE_U32_ARRAY:
 			rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
-				prop_value[i], prop_count[i],
-				sde_prop[i].is_mandatory);
+				&PROP_VALUE_ACCESS(prop_value, i, 0),
+				prop_count[i], sde_prop[i].is_mandatory);
 			if (rc && sde_prop[i].is_mandatory) {
 				SDE_ERROR("%s prop validation success but \"\
 					read failed\n", sde_prop[i].prop_name);
+				prop_exists[i] = false;
 				goto end;
 			} else {
+				if (rc)
+					prop_exists[i] = false;
 				/* only for debug purpose */
 				SDE_DEBUG("prop id:%d prop name:%s prop \"\
 					type:%d", i, sde_prop[i].prop_name,
 					sde_prop[i].type);
 				for (j = 0; j < prop_count[i]; j++)
 					SDE_DEBUG(" value[%d]:0x%x ", j,
-							prop_value[i][j]);
+						PROP_VALUE_ACCESS(prop_value, i,
+								j));
 				SDE_DEBUG("\n");
 			}
 			break;
 		case PROP_TYPE_BIT_OFFSET_ARRAY:
 			rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
-				bit_value[i], prop_count[i],
+				prop_value, i, prop_count[i],
 				sde_prop[i].is_mandatory);
 			if (rc && sde_prop[i].is_mandatory) {
 				SDE_ERROR("%s prop validation success but \"\
 					read failed\n", sde_prop[i].prop_name);
+				prop_exists[i] = false;
 				goto end;
 			} else {
+				if (rc)
+					prop_exists[i] = false;
 				SDE_DEBUG("prop id:%d prop name:%s prop \"\
 					type:%d", i, sde_prop[i].prop_name,
 					sde_prop[i].type);
 				for (j = 0; j < prop_count[i]; j++)
 					SDE_DEBUG(" count[%d]: bit:0x%x \"\
-					off:0x%x ", j, bit_value[i][j][0],
-					bit_value[i][j][1]);
+					off:0x%x \n", j,
+					PROP_BITVALUE_ACCESS(prop_value,
+						i, j, 0),
+					PROP_BITVALUE_ACCESS(prop_value,
+						i, j, 1));
 				SDE_DEBUG("\n");
 			}
 			break;
+		case PROP_TYPE_NODE:
+			/* Node will be parsed in calling function */
+			rc = 0;
+			break;
 		default:
 			SDE_DEBUG("invalid property type:%d\n",
 							sde_prop[i].type);
@@ -491,46 +639,77 @@
 
 static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	u32 prop_value[][MAX_SDE_HW_BLK], u32 *vig_count)
+	bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
 {
-	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
-		set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
-		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
-	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
-		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
-		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
-	}
-
-	set_bit(SDE_SSPP_CSC, &sspp->features);
-	sblk->csc_blk.base = prop_value[SSPP_CSC_OFF][0];
-	sblk->csc_blk.id = SDE_SSPP_CSC;
-
 	sblk->maxupscale = MAX_SSPP_UPSCALE;
 	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
 	sspp->id = SSPP_VIG0 + *vig_count;
 	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
-
 	sblk->format_list = plane_formats_yuv;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
 	(*vig_count)++;
+
+	if (!prop_value)
+		return;
+
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_OFF, 0);
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_QSEED_OFF, 0);
+	}
+
+	sblk->csc_blk.id = SDE_SSPP_CSC;
+	if (sde_cfg->csc_type == SDE_SSPP_CSC) {
+		set_bit(SDE_SSPP_CSC, &sspp->features);
+		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+							VIG_CSC_OFF, 0);
+	} else if (sde_cfg->csc_type == SDE_SSPP_CSC_10BIT) {
+		set_bit(SDE_SSPP_CSC_10BIT, &sspp->features);
+		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+							VIG_CSC_OFF, 0);
+	}
+
+	sblk->hsic_blk.id = SDE_SSPP_HSIC;
+	if (prop_exists[VIG_HSIC_PROP]) {
+		sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_HSIC_PROP, 0);
+		sblk->hsic_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_HSIC_PROP, 1);
+		sblk->hsic_blk.len = 0;
+		set_bit(SDE_SSPP_HSIC, &sspp->features);
+	}
+
+	sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
+	if (prop_exists[VIG_MEMCOLOR_PROP]) {
+		sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_MEMCOLOR_PROP, 0);
+		sblk->memcolor_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_MEMCOLOR_PROP, 1);
+		sblk->memcolor_blk.len = 0;
+		set_bit(SDE_SSPP_MEMCOLOR, &sspp->features);
+	}
+
+	sblk->pcc_blk.id = SDE_SSPP_PCC;
+	if (prop_exists[VIG_PCC_PROP]) {
+		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+			VIG_PCC_PROP, 0);
+		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+			VIG_PCC_PROP, 1);
+		sblk->pcc_blk.len = 0;
+		set_bit(SDE_SSPP_PCC, &sspp->features);
+	}
 }
 
 static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	u32 prop_value[][MAX_SDE_HW_BLK], u32 *rgb_count)
+	bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
 {
-	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
-		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
-		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
-	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
-		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
-		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
-		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
-	}
-
 	sblk->maxupscale = MAX_SSPP_UPSCALE;
 	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
 	sspp->id = SSPP_RGB0 + *rgb_count;
@@ -538,11 +717,36 @@
 	sblk->format_list = plane_formats;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
 	(*rgb_count)++;
+
+	if (!prop_value)
+		return;
+
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_SCALER_OFF, 0);
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_SCALER_OFF, 0);
+	}
+
+	sblk->pcc_blk.id = SDE_SSPP_PCC;
+	if (prop_exists[RGB_PCC_PROP]) {
+		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+			RGB_PCC_PROP, 0);
+		sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+			RGB_PCC_PROP, 1);
+		sblk->pcc_blk.len = 0;
+		set_bit(SDE_SSPP_PCC, &sspp->features);
+	}
 }
 
 static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	u32 prop_value[][MAX_SDE_HW_BLK], u32 *cursor_count)
+	struct sde_prop_value *prop_value, u32 *cursor_count)
 {
 	set_bit(SDE_SSPP_CURSOR, &sspp->features);
 	sblk->maxupscale = SSPP_UNITY_SCALE;
@@ -555,7 +759,7 @@
 
 static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	u32 prop_value[][MAX_SDE_HW_BLK], u32 *dma_count)
+	struct sde_prop_value *prop_value, u32 *dma_count)
 {
 	sblk->maxupscale = SSPP_UNITY_SCALE;
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
@@ -569,14 +773,25 @@
 static int sde_sspp_parse_dt(struct device_node *np,
 	struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], off_count, i, j;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK];
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+	int rc, prop_count[SSPP_PROP_MAX], off_count, i, j;
+	int vig_prop_count[VIG_PROP_MAX], rgb_prop_count[RGB_PROP_MAX];
+	bool prop_exists[SSPP_PROP_MAX], vig_prop_exists[VIG_PROP_MAX];
+	bool rgb_prop_exists[RGB_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	struct sde_prop_value *vig_prop_value = NULL, *rgb_prop_value = NULL;
 	const char *type;
 	struct sde_sspp_cfg *sspp;
 	struct sde_sspp_sub_blks *sblk;
 	u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
 	u32 danger_count = 0, safe_count = 0;
+	struct device_node *snp = NULL;
+
+	prop_value = kzalloc(SSPP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
 
 	rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
 		prop_count, &off_count);
@@ -594,12 +809,49 @@
 		goto end;
 
 	rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
-							prop_value, bit_value);
+					prop_exists, prop_value);
 	if (rc)
 		goto end;
 
 	sde_cfg->sspp_count = off_count;
 
+	/* get vig feature dt properties if they exist */
+	snp = of_get_child_by_name(np, sspp_prop[SSPP_VIG_BLOCKS].prop_name);
+	if (snp) {
+		vig_prop_value = kzalloc(VIG_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+		if (!vig_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+			vig_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+				vig_prop_count, vig_prop_exists,
+				vig_prop_value);
+	}
+
+	/* get rgb feature dt properties if they exist */
+	snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
+	if (snp) {
+		rgb_prop_value = kzalloc(RGB_PROP_MAX *
+					sizeof(struct sde_prop_value),
+					GFP_KERNEL);
+		if (!rgb_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+			rgb_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+				rgb_prop_count, rgb_prop_exists,
+				rgb_prop_value);
+	}
+
 	for (i = 0; i < off_count; i++) {
 		sspp = sde_cfg->sspp + i;
 		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
@@ -610,7 +862,7 @@
 		}
 		sspp->sblk = sblk;
 
-		sspp->base = prop_value[SSPP_OFF][i];
+		sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i);
 		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
@@ -619,16 +871,18 @@
 		of_property_read_string_index(np,
 				sspp_prop[SSPP_TYPE].prop_name, i, &type);
 		if (!strcmp(type, "vig")) {
-			_sde_sspp_setup_vig(sde_cfg, sspp, sblk, prop_value,
-								&vig_count);
+			_sde_sspp_setup_vig(sde_cfg, sspp, sblk,
+				vig_prop_exists, vig_prop_value, &vig_count);
 		} else if (!strcmp(type, "rgb")) {
-			_sde_sspp_setup_rgb(sde_cfg, sspp, sblk, prop_value,
-								&rgb_count);
+			_sde_sspp_setup_rgb(sde_cfg, sspp, sblk,
+				rgb_prop_exists, rgb_prop_value, &rgb_count);
 		} else if (!strcmp(type, "cursor")) {
-			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, prop_value,
+			/* No prop values for cursor pipes */
+			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
 								&cursor_count);
 		} else if (!strcmp(type, "dma")) {
-			_sde_sspp_setup_dma(sde_cfg, sspp, sblk, prop_value,
+			/* No prop values for DMA pipes */
+			_sde_sspp_setup_dma(sde_cfg, sspp, sblk, NULL,
 								&dma_count);
 		} else {
 			SDE_ERROR("invalid sspp type:%s\n", type);
@@ -639,22 +893,30 @@
 		sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
 		sblk->maxvdeciexp = MAX_VERT_DECIMATION;
 
-		sspp->xin_id = prop_value[SSPP_XIN][i];
-		sblk->danger_lut_linear = prop_value[SSPP_DANGER][0];
-		sblk->danger_lut_tile = prop_value[SSPP_DANGER][1];
-		sblk->danger_lut_nrt = prop_value[SSPP_DANGER][2];
-		sblk->safe_lut_linear = prop_value[SSPP_SAFE][0];
-		sblk->safe_lut_tile = prop_value[SSPP_SAFE][1];
-		sblk->safe_lut_nrt = prop_value[SSPP_SAFE][2];
+		sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
+		sblk->danger_lut_linear =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
+		sblk->danger_lut_tile =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
+		sblk->danger_lut_nrt =
+			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
+		sblk->safe_lut_linear =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
+		sblk->safe_lut_tile =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
+		sblk->safe_lut_nrt =
+			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
 		sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
 		sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
-		sblk->src_blk.len = prop_value[SSPP_SIZE][0];
+		sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
 
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
-					bit_value[SSPP_CLK_CTRL][i][0];
+				PROP_BITVALUE_ACCESS(prop_value,
+						SSPP_CLK_CTRL, i, 0);
 			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
-					bit_value[SSPP_CLK_CTRL][i][1];
+				PROP_BITVALUE_ACCESS(prop_value,
+						SSPP_CLK_CTRL, i, 1);
 		}
 
 		SDE_DEBUG(
@@ -674,16 +936,18 @@
 	}
 
 end:
+	kfree(prop_value);
+	kfree(vig_prop_value);
+	kfree(rgb_prop_value);
 	return rc;
 }
 
 static int sde_ctl_parse_dt(struct device_node *np,
 		struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { {0} };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
+	int rc, prop_count[HW_PROP_MAX], i;
+	bool prop_exists[HW_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
 	struct sde_ctl_cfg *ctl;
 	u32 off_count;
 
@@ -693,6 +957,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
 		&off_count);
 	if (rc)
@@ -701,13 +972,13 @@
 	sde_cfg->ctl_count = off_count;
 
 	rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
 	for (i = 0; i < off_count; i++) {
 		ctl = sde_cfg->ctl + i;
-		ctl->base = prop_value[HW_OFF][i];
+		ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
 		ctl->id = CTL_0 + i;
 
 		if (i < MAX_SPLIT_DISPLAY_CTL)
@@ -717,17 +988,18 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
 static int sde_mixer_parse_dt(struct device_node *np,
 						struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
-
+	int rc, prop_count[MIXER_PROP_MAX], i;
+	int blocks_prop_count[MIXER_BLOCKS_PROP_MAX];
+	bool prop_exists[MIXER_PROP_MAX];
+	bool blocks_prop_exists[MIXER_BLOCKS_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
 	u32 off_count, max_blendstages;
 	u32 blend_reg_base[] = {0x20, 0x50, 0x80, 0xb0, 0x230, 0x260, 0x290};
 	u32 lm_pair_mask[] = {LM_1, LM_0, LM_5, 0x0, 0x0, LM_2};
@@ -735,6 +1007,7 @@
 	struct sde_lm_sub_blks *sblk;
 	int pp_count, dspp_count;
 	u32 pp_idx, dspp_idx;
+	struct device_node *snp = NULL;
 
 	if (!sde_cfg) {
 		SDE_ERROR("invalid argument input param\n");
@@ -743,6 +1016,13 @@
 	}
 	max_blendstages = sde_cfg->max_mixer_blendstages;
 
+	prop_value = kzalloc(MIXER_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
 		prop_count, &off_count);
 	if (rc)
@@ -751,13 +1031,33 @@
 	sde_cfg->mixer_count = off_count;
 
 	rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
 	pp_count = sde_cfg->pingpong_count;
 	dspp_count = sde_cfg->dspp_count;
 
+	/* get mixer feature dt properties if they exist */
+	snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
+	if (snp) {
+		blocks_prop_value = kzalloc(MIXER_BLOCKS_PROP_MAX *
+				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+				GFP_KERNEL);
+		if (!blocks_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, mixer_blocks_prop,
+			ARRAY_SIZE(mixer_blocks_prop), blocks_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, mixer_blocks_prop,
+				ARRAY_SIZE(mixer_blocks_prop),
+				blocks_prop_count, blocks_prop_exists,
+				blocks_prop_value);
+	}
+
 	for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
 		mixer = sde_cfg->mixer + i;
 		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
@@ -768,10 +1068,10 @@
 		}
 		mixer->sblk = sblk;
 
-		mixer->base = prop_value[HW_OFF][i];
-		mixer->len = prop_value[HW_LEN][0];
+		mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
+		mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
 		mixer->id = LM_0 + i;
-		if (!mixer->len)
+		if (!prop_exists[MIXER_LEN])
 			mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
 
 		if (lm_pair_mask[i])
@@ -784,8 +1084,6 @@
 			ARRAY_SIZE(blend_reg_base), max_blendstages)));
 		if (sde_cfg->has_src_split)
 			set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
-		if (prop_value[MIXER_GC][0])
-			set_bit(SDE_MIXER_GC, &mixer->features);
 
 		if ((i < ROT_LM_OFFSET) || (i >= LINE_LM_OFFSET)) {
 			mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
@@ -800,19 +1098,30 @@
 			mixer->pingpong = PINGPONG_MAX;
 			mixer->dspp = DSPP_MAX;
 		}
+
+		sblk->gc.id = SDE_MIXER_GC;
+		if (blocks_prop_value && blocks_prop_exists[MIXER_GC_PROP]) {
+			sblk->gc.base = PROP_VALUE_ACCESS(blocks_prop_value,
+					MIXER_GC_PROP, 0);
+			sblk->gc.version = PROP_VALUE_ACCESS(blocks_prop_value,
+					MIXER_GC_PROP, 1);
+			sblk->gc.len = 0;
+			set_bit(SDE_MIXER_GC, &mixer->features);
+		}
 	}
 
 end:
+	kfree(prop_value);
+	kfree(blocks_prop_value);
 	return rc;
 }
 
 static int sde_intf_parse_dt(struct device_node *np,
 						struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-					= { { { 0 } } };
+	int rc, prop_count[INTF_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[INTF_PROP_MAX];
 	u32 off_count;
 	u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
 	const char *type;
@@ -824,6 +1133,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(INTF_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
 		prop_count, &off_count);
 	if (rc)
@@ -832,20 +1148,20 @@
 	sde_cfg->intf_count = off_count;
 
 	rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
 	for (i = 0; i < off_count; i++) {
 		intf = sde_cfg->intf + i;
-		intf->base = prop_value[INTF_OFF][i];
-		intf->len = prop_value[INTF_LEN][0];
+		intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
+		intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
 		intf->id = INTF_0 + i;
-		if (!intf->len)
+		if (!prop_exists[INTF_LEN])
 			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
 
 		intf->prog_fetch_lines_worst_case =
-					prop_value[INTF_PREFETCH][i];
+				PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
 
 		of_property_read_string_index(np,
 				intf_prop[INTF_TYPE].prop_name, i, &type);
@@ -869,15 +1185,15 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
 static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i, j;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-					= { { { 0 } } };
+	int rc, prop_count[WB_PROP_MAX], i, j;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[WB_PROP_MAX];
 	u32 off_count;
 	struct sde_wb_cfg *wb;
 	struct sde_wb_sub_blocks *sblk;
@@ -888,6 +1204,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(WB_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
 		&off_count);
 	if (rc)
@@ -896,7 +1219,7 @@
 	sde_cfg->wb_count = off_count;
 
 	rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
@@ -910,14 +1233,15 @@
 		}
 		wb->sblk = sblk;
 
-		wb->base = prop_value[WB_OFF][i];
-		wb->id = WB_0 + prop_value[WB_ID][i];
-		wb->clk_ctrl = SDE_CLK_CTRL_WB0 + prop_value[WB_ID][i];
-		wb->xin_id = prop_value[WB_XIN_ID][i];
+		wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
+		wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+		wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
+			PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+		wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
 		wb->vbif_idx = VBIF_NRT;
-		wb->len = prop_value[WB_LEN][0];
+		wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
 		wb->format_list = wb2_formats;
-		if (!wb->len)
+		if (!prop_exists[WB_LEN])
 			wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
 		sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
 
@@ -930,9 +1254,11 @@
 
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
-					bit_value[WB_CLK_CTRL][i][0];
+				PROP_BITVALUE_ACCESS(prop_value,
+						WB_CLK_CTRL, i, 0);
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
-					bit_value[WB_CLK_CTRL][i][1];
+				PROP_BITVALUE_ACCESS(prop_value,
+						WB_CLK_CTRL, i, 1);
 		}
 
 		SDE_DEBUG(
@@ -946,19 +1272,128 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
+static void _sde_dspp_setup_blocks(struct sde_mdss_cfg *sde_cfg,
+	struct sde_dspp_cfg *dspp, struct sde_dspp_sub_blks *sblk,
+	bool *prop_exists, struct sde_prop_value *prop_value)
+{
+	sblk->igc.id = SDE_DSPP_IGC;
+	if (prop_exists[DSPP_IGC_PROP]) {
+		sblk->igc.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_IGC_PROP, 0);
+		sblk->igc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_IGC_PROP, 1);
+		sblk->igc.len = 0;
+		set_bit(SDE_DSPP_IGC, &dspp->features);
+	}
+
+	sblk->pcc.id = SDE_DSPP_PCC;
+	if (prop_exists[DSPP_PCC_PROP]) {
+		sblk->pcc.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_PCC_PROP, 0);
+		sblk->pcc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_PCC_PROP, 1);
+		sblk->pcc.len = 0;
+		set_bit(SDE_DSPP_PCC, &dspp->features);
+	}
+
+	sblk->gc.id = SDE_DSPP_GC;
+	if (prop_exists[DSPP_GC_PROP]) {
+		sblk->gc.base = PROP_VALUE_ACCESS(prop_value, DSPP_GC_PROP, 0);
+		sblk->gc.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GC_PROP, 1);
+		sblk->gc.len = 0;
+		set_bit(SDE_DSPP_GC, &dspp->features);
+	}
+
+	sblk->gamut.id = SDE_DSPP_GAMUT;
+	if (prop_exists[DSPP_GAMUT_PROP]) {
+		sblk->gamut.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GAMUT_PROP, 0);
+		sblk->gamut.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_GAMUT_PROP, 1);
+		sblk->gamut.len = 0;
+		set_bit(SDE_DSPP_GAMUT, &dspp->features);
+	}
+
+	sblk->dither.id = SDE_DSPP_DITHER;
+	if (prop_exists[DSPP_DITHER_PROP]) {
+		sblk->dither.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_DITHER_PROP, 0);
+		sblk->dither.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_DITHER_PROP, 1);
+		sblk->dither.len = 0;
+		set_bit(SDE_DSPP_DITHER, &dspp->features);
+	}
+
+	sblk->hist.id = SDE_DSPP_HIST;
+	if (prop_exists[DSPP_HIST_PROP]) {
+		sblk->hist.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HIST_PROP, 0);
+		sblk->hist.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HIST_PROP, 1);
+		sblk->hist.len = 0;
+		set_bit(SDE_DSPP_HIST, &dspp->features);
+	}
+
+	sblk->hsic.id = SDE_DSPP_HSIC;
+	if (prop_exists[DSPP_HSIC_PROP]) {
+		sblk->hsic.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HSIC_PROP, 0);
+		sblk->hsic.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_HSIC_PROP, 1);
+		sblk->hsic.len = 0;
+		set_bit(SDE_DSPP_HSIC, &dspp->features);
+	}
+
+	sblk->memcolor.id = SDE_DSPP_MEMCOLOR;
+	if (prop_exists[DSPP_MEMCOLOR_PROP]) {
+		sblk->memcolor.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_MEMCOLOR_PROP, 0);
+		sblk->memcolor.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_MEMCOLOR_PROP, 1);
+		sblk->memcolor.len = 0;
+		set_bit(SDE_DSPP_MEMCOLOR, &dspp->features);
+	}
+
+	sblk->sixzone.id = SDE_DSPP_SIXZONE;
+	if (prop_exists[DSPP_SIXZONE_PROP]) {
+		sblk->sixzone.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_SIXZONE_PROP, 0);
+		sblk->sixzone.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_SIXZONE_PROP, 1);
+		sblk->sixzone.len = 0;
+		set_bit(SDE_DSPP_SIXZONE, &dspp->features);
+	}
+
+	sblk->vlut.id = SDE_DSPP_VLUT;
+	if (prop_exists[DSPP_VLUT_PROP]) {
+		sblk->vlut.base = PROP_VALUE_ACCESS(prop_value,
+			DSPP_VLUT_PROP, 0);
+		sblk->vlut.version = PROP_VALUE_ACCESS(prop_value,
+			DSPP_VLUT_PROP, 1);
+		sblk->sixzone.len = 0;
+		set_bit(SDE_DSPP_VLUT, &dspp->features);
+	}
+}
+
 static int sde_dspp_parse_dt(struct device_node *np,
 						struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
-	u32 off_count;
+	int rc, prop_count[DSPP_PROP_MAX], i;
+	int ad_prop_count[AD_PROP_MAX];
+	bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
+	bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
+	struct sde_prop_value *ad_prop_value = NULL;
+	int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+	u32 off_count, ad_off_count;
 	struct sde_dspp_cfg *dspp;
 	struct sde_dspp_sub_blks *sblk;
+	struct device_node *snp = NULL;
 
 	if (!sde_cfg) {
 		SDE_ERROR("invalid argument\n");
@@ -966,6 +1401,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(DSPP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
 		prop_count, &off_count);
 	if (rc)
@@ -974,13 +1416,50 @@
 	sde_cfg->dspp_count = off_count;
 
 	rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
+	/* Parse AD dtsi entries */
+	ad_prop_value = kzalloc(AD_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!ad_prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	rc = _validate_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop),
+		ad_prop_count, &ad_off_count);
+	if (rc)
+		goto end;
+	rc = _read_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop), ad_prop_count,
+		ad_prop_exists, ad_prop_value);
+	if (rc)
+		goto end;
+
+	/* get DSPP feature dt properties if they exist */
+	snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
+	if (snp) {
+		blocks_prop_value = kzalloc(DSPP_BLOCKS_PROP_MAX *
+				MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+				GFP_KERNEL);
+		if (!blocks_prop_value) {
+			rc = -ENOMEM;
+			goto end;
+		}
+		rc = _validate_dt_entry(snp, dspp_blocks_prop,
+			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count, NULL);
+		if (rc)
+			goto end;
+		rc = _read_dt_entry(snp, dspp_blocks_prop,
+			ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count,
+			blocks_prop_exists, blocks_prop_value);
+		if (rc)
+			goto end;
+	}
+
 	for (i = 0; i < off_count; i++) {
 		dspp = sde_cfg->dspp + i;
-		dspp->base = prop_value[DSPP_OFF][i];
+		dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
 		dspp->id = DSPP_0 + i;
 
 		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
@@ -991,46 +1470,34 @@
 		}
 		dspp->sblk = sblk;
 
-		sblk->igc.base = prop_value[DSPP_IGC][0];
-		if (sblk->igc.base)
-			set_bit(SDE_DSPP_IGC, &dspp->features);
+		if (blocks_prop_value)
+			_sde_dspp_setup_blocks(sde_cfg, dspp, sblk,
+					blocks_prop_exists, blocks_prop_value);
 
-		sblk->pcc.base = prop_value[DSPP_PCC][0];
-		if (sblk->pcc.base)
-			set_bit(SDE_DSPP_PCC, &dspp->features);
-
-		sblk->gc.base = prop_value[DSPP_GC][0];
-		if (sblk->gc.base)
-			set_bit(SDE_DSPP_GC, &dspp->features);
-
-		sblk->gamut.base = prop_value[DSPP_GAMUT][0];
-		if (sblk->gamut.base)
-			set_bit(SDE_DSPP_GAMUT, &dspp->features);
-
-		sblk->dither.base = prop_value[DSPP_DITHER][0];
-		if (sblk->dither.base)
-			set_bit(SDE_DSPP_DITHER, &dspp->features);
-
-		sblk->hist.base = prop_value[DSPP_HIST][0];
-		if (sblk->hist.base)
-			set_bit(SDE_DSPP_HIST, &dspp->features);
-
-		sblk->ad.base = prop_value[DSPP_AD][i];
-		if (sblk->ad.base)
+		sblk->ad.id = SDE_DSPP_AD;
+		if (ad_prop_value && (i < ad_off_count) &&
+		    ad_prop_exists[AD_OFF]) {
+			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
+				AD_OFF, i);
+			sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
+				AD_VERSION, 0);
 			set_bit(SDE_DSPP_AD, &dspp->features);
+		}
 	}
 
 end:
+	kfree(prop_value);
+	kfree(ad_prop_value);
+	kfree(blocks_prop_value);
 	return rc;
 }
 
 static int sde_cdm_parse_dt(struct device_node *np,
 				struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
+	int rc, prop_count[HW_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[HW_PROP_MAX];
 	u32 off_count;
 	struct sde_cdm_cfg *cdm;
 
@@ -1040,6 +1507,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
 		&off_count);
 	if (rc)
@@ -1048,15 +1522,15 @@
 	sde_cfg->cdm_count = off_count;
 
 	rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
 	for (i = 0; i < off_count; i++) {
 		cdm = sde_cfg->cdm + i;
-		cdm->base = prop_value[HW_OFF][i];
+		cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
 		cdm->id = CDM_0 + i;
-		cdm->len = prop_value[HW_LEN][0];
+		cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 
 		/* intf3 and wb2 for cdm block */
 		cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
@@ -1064,16 +1538,16 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
 static int sde_vbif_parse_dt(struct device_node *np,
 				struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i, j, k;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
+	int rc, prop_count[VBIF_PROP_MAX], i, j, k;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[VBIF_PROP_MAX];
 	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
 	struct sde_vbif_cfg *vbif;
 
@@ -1083,6 +1557,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(VBIF_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
 			prop_count, &off_count);
 	if (rc)
@@ -1101,31 +1582,31 @@
 	sde_cfg->vbif_count = off_count;
 
 	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
-	vbif_len = prop_value[VBIF_LEN][0];
-	if (!vbif_len)
+	vbif_len = PROP_VALUE_ACCESS(prop_value, VBIF_LEN, 0);
+	if (!prop_exists[VBIF_LEN])
 		vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
 
 	for (i = 0; i < off_count; i++) {
 		vbif = sde_cfg->vbif + i;
-		vbif->base = prop_value[VBIF_OFF][i];
+		vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
 		vbif->len = vbif_len;
-		vbif->id = VBIF_0 + prop_value[VBIF_ID][i];
+		vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
 
 		SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
 
 		vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
 
-		vbif->default_ot_rd_limit =
-				prop_value[VBIF_DEFAULT_OT_RD_LIMIT][0];
+		vbif->default_ot_rd_limit = PROP_VALUE_ACCESS(prop_value,
+				VBIF_DEFAULT_OT_RD_LIMIT, 0);
 		SDE_DEBUG("default_ot_rd_limit=%u\n",
 				vbif->default_ot_rd_limit);
 
-		vbif->default_ot_wr_limit =
-				prop_value[VBIF_DEFAULT_OT_WR_LIMIT][0];
+		vbif->default_ot_wr_limit = PROP_VALUE_ACCESS(prop_value,
+				VBIF_DEFAULT_OT_WR_LIMIT, 0);
 		SDE_DEBUG("default_ot_wr_limit=%u\n",
 				vbif->default_ot_wr_limit);
 
@@ -1146,9 +1627,11 @@
 
 		for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
 			vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
-				prop_value[VBIF_DYNAMIC_OT_RD_LIMIT][k++];
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_RD_LIMIT, k++);
 			vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
-				prop_value[VBIF_DYNAMIC_OT_RD_LIMIT][k++];
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_RD_LIMIT, k++);
 			SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
 				vbif->dynamic_ot_rd_tbl.cfg[j].pps,
 				vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
@@ -1171,9 +1654,11 @@
 
 		for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
 			vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
-				prop_value[VBIF_DYNAMIC_OT_WR_LIMIT][k++];
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_WR_LIMIT, k++);
 			vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
-				prop_value[VBIF_DYNAMIC_OT_WR_LIMIT][k++];
+				PROP_VALUE_ACCESS(prop_value,
+				VBIF_DYNAMIC_OT_WR_LIMIT, k++);
 			SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
 				vbif->dynamic_ot_wr_tbl.cfg[j].pps,
 				vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
@@ -1186,15 +1671,15 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
 static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[MAX_BLOCKS], i;
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-				= { { { 0 } } };
+	int rc, prop_count[PP_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[PP_PROP_MAX];
 	u32 off_count;
 	struct sde_pingpong_cfg *pp;
 	struct sde_pingpong_sub_blks *sblk;
@@ -1205,6 +1690,13 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(PP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
 		&off_count);
 	if (rc)
@@ -1213,7 +1705,7 @@
 	sde_cfg->pingpong_count = off_count;
 
 	rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
@@ -1227,22 +1719,25 @@
 		}
 		pp->sblk = sblk;
 
-		pp->base = prop_value[PP_OFF][i];
+		pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
 		pp->id = PINGPONG_0 + i;
-		pp->len = prop_value[PP_LEN][0];
+		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
 
-		sblk->te.base = prop_value[TE_OFF][i];
+		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
 		sblk->te.id = SDE_PINGPONG_TE;
 		set_bit(SDE_PINGPONG_TE, &pp->features);
 
-		sblk->te2.base = prop_value[TE2_OFF][i];
+		sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
 		if (sblk->te2.base) {
 			sblk->te2.id = SDE_PINGPONG_TE2;
 			set_bit(SDE_PINGPONG_TE2, &pp->features);
 			set_bit(SDE_PINGPONG_SPLIT, &pp->features);
 		}
 
-		sblk->dsc.base = prop_value[DSC_OFF][i];
+		if (PROP_VALUE_ACCESS(prop_value, PP_SLAVE, i))
+			set_bit(SDE_PINGPONG_SLAVE, &pp->features);
+
+		sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
 		if (sblk->dsc.base) {
 			sblk->dsc.id = SDE_PINGPONG_DSC;
 			set_bit(SDE_PINGPONG_DSC, &pp->features);
@@ -1250,15 +1745,15 @@
 	}
 
 end:
+	kfree(prop_value);
 	return rc;
 }
 
 static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
 {
-	int rc, len, prop_count[MAX_BLOCKS];
-	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { {0} };
-	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
-			= { { { 0 } } };
+	int rc, len, prop_count[SDE_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[SDE_PROP_MAX];
 	const char *type;
 
 	if (!cfg) {
@@ -1267,13 +1762,20 @@
 		goto end;
 	}
 
+	prop_value = kzalloc(SDE_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
 	rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
 		&len);
 	if (rc)
 		goto end;
 
 	rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
-		prop_value, bit_value);
+		prop_exists, prop_value);
 	if (rc)
 		goto end;
 
@@ -1283,29 +1785,33 @@
 
 	cfg->mdp_count = 1;
 	cfg->mdp[0].id = MDP_TOP;
-	cfg->mdp[0].base = prop_value[SDE_OFF][0];
-	cfg->mdp[0].len = prop_value[SDE_LEN][0];
-	if (!cfg->mdp[0].len)
+	cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
+	cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
+	if (!prop_exists[SDE_LEN])
 		cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
 
-	cfg->max_sspp_linewidth = prop_value[SSPP_LINEWIDTH][0];
-	if (!cfg->max_sspp_linewidth)
+	cfg->max_sspp_linewidth = PROP_VALUE_ACCESS(prop_value,
+			SSPP_LINEWIDTH, 0);
+	if (!prop_exists[SSPP_LINEWIDTH])
 		cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
 
-	cfg->max_mixer_width = prop_value[MIXER_LINEWIDTH][0];
-	if (!cfg->max_mixer_width)
+	cfg->max_mixer_width = PROP_VALUE_ACCESS(prop_value,
+			MIXER_LINEWIDTH, 0);
+	if (!prop_exists[MIXER_LINEWIDTH])
 		cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
 
-	cfg->max_mixer_blendstages = prop_value[MIXER_BLEND][0];
-	if (!cfg->max_mixer_blendstages)
+	cfg->max_mixer_blendstages = PROP_VALUE_ACCESS(prop_value,
+			MIXER_BLEND, 0);
+	if (!prop_exists[MIXER_BLEND])
 		cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
 
-	cfg->max_wb_linewidth = prop_value[WB_LINEWIDTH][0];
-	if (!cfg->max_wb_linewidth)
+	cfg->max_wb_linewidth = PROP_VALUE_ACCESS(prop_value, WB_LINEWIDTH, 0);
+	if (!prop_exists[WB_LINEWIDTH])
 		cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
 
-	cfg->mdp[0].highest_bank_bit = prop_value[BANK_BIT][0];
-	if (!cfg->mdp[0].highest_bank_bit)
+	cfg->mdp[0].highest_bank_bit = PROP_VALUE_ACCESS(prop_value,
+			BANK_BIT, 0);
+	if (!prop_exists[BANK_BIT])
 		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
 
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
@@ -1314,8 +1820,15 @@
 	else if (!rc && !strcmp(type, "qseedv2"))
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
 
-	cfg->has_src_split = prop_value[SRC_SPLIT][0];
+	rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
+	if (!rc && !strcmp(type, "csc"))
+		cfg->csc_type = SDE_SSPP_CSC;
+	else if (!rc && !strcmp(type, "csc-10bit"))
+		cfg->csc_type = SDE_SSPP_CSC_10BIT;
+
+	cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
 end:
+	kfree(prop_value);
 	return rc;
 }
 
@@ -1334,7 +1847,7 @@
 	}
 }
 
-static void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 {
 	int i;
 
@@ -1360,6 +1873,7 @@
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
 	}
+	kfree(sde_cfg);
 }
 
 /*************************************************************
@@ -1425,6 +1939,5 @@
 
 end:
 	sde_hw_catalog_deinit(sde_cfg);
-	kfree(sde_cfg);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 911fbe2..7282f75 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -85,6 +85,7 @@
  * @SDE_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
  * @SDE_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
  * @SDE_SSPP_CSC,            Support of Color space converion
+ * @SDE_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
  * @SDE_SSPP_HSIC,           Global HSIC control
  * @SDE_SSPP_MEMCOLOR        Memory Color Support
  * @SDE_SSPP_IGC,            Inverse gamma correction
@@ -99,6 +100,7 @@
 	SDE_SSPP_SCALER_QSEED3,
 	SDE_SSPP_SCALER_RGB,
 	SDE_SSPP_CSC,
+	SDE_SSPP_CSC_10BIT,
 	SDE_SSPP_HSIC,
 	SDE_SSPP_MEMCOLOR,
 	SDE_SSPP_IGC,
@@ -157,6 +159,7 @@
  * @SDE_PINGPONG_TE         Tear check block
  * @SDE_PINGPONG_TE2        Additional tear check block for split pipes
  * @SDE_PINGPONG_SPLIT      PP block supports split fifo
+ * @SDE_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
  * @SDE_PINGPONG_DSC,       Display stream compression blocks
  * @SDE_PINGPONG_MAX
  */
@@ -164,6 +167,7 @@
 	SDE_PINGPONG_TE = 0x1,
 	SDE_PINGPONG_TE2,
 	SDE_PINGPONG_SPLIT,
+	SDE_PINGPONG_SLAVE,
 	SDE_PINGPONG_DSC,
 	SDE_PINGPONG_MAX
 };
@@ -262,9 +266,11 @@
 /**
  * struct sde_scaler_blk: Scaler information
  * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
  */
 struct sde_scaler_blk {
 	SDE_HW_SUBBLK_INFO;
+	u32 version;
 };
 
 struct sde_csc_blk {
@@ -335,8 +341,8 @@
 	struct sde_src_blk src_blk;
 	struct sde_scaler_blk scaler_blk;
 	struct sde_pp_blk csc_blk;
-	struct sde_pp_blk hsic;
-	struct sde_pp_blk memcolor;
+	struct sde_pp_blk hsic_blk;
+	struct sde_pp_blk memcolor_blk;
 	struct sde_pp_blk pcc_blk;
 	struct sde_pp_blk igc_blk;
 
@@ -603,6 +609,7 @@
  * @max_wb_linewidth   max writeback line width support.
  * @highest_bank_bit   highest memory bit setting for tile buffers.
  * @qseed_type         qseed2 or qseed3 support.
+ * @csc_type           csc or csc_10bit support.
  * @has_src_split      source split feature status
  * @has_cdp            Client driver prefetch feature status
  */
@@ -615,6 +622,7 @@
 	u32 max_wb_linewidth;
 	u32 highest_bank_bit;
 	u32 qseed_type;
+	u32 csc_type;
 	bool has_src_split;
 	bool has_cdp;
 
@@ -677,7 +685,7 @@
 #define BLK_AD(s) ((s)->ad)
 
 /**
- * sde_hw_catalog_init() - sde hardware catalog init API parses dtsi property
+ * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
  * and stores all parsed offset, hardware capabilities in config structure.
  * @dev:          drm device node.
  * @hw_rev:       caller needs provide the hardware revision before parsing.
@@ -686,4 +694,10 @@
  */
 struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
 
+/**
+ * sde_hw_catalog_deinit - sde hardware catalog cleanup
+ * @sde_cfg:      pointer returned from init function
+ */
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
+
 #endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
index 240c81c..298ebf7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -20,6 +20,8 @@
 	{DRM_FORMAT_BGRA8888, 0},
 	{DRM_FORMAT_XRGB8888, 0},
 	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
 	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_RGB888, 0},
 	{DRM_FORMAT_BGR888, 0},
@@ -49,9 +51,11 @@
 	{DRM_FORMAT_ARGB8888, 0},
 	{DRM_FORMAT_ABGR8888, 0},
 	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
 	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_BGRA8888, 0},
 	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
 	{DRM_FORMAT_RGBX8888, 0},
 	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_RGB888, 0},
@@ -111,6 +115,7 @@
 	{DRM_FORMAT_ABGR8888, 0},
 	{DRM_FORMAT_BGRA8888, 0},
 	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
 	{DRM_FORMAT_ABGR1555, 0},
 	{DRM_FORMAT_BGRA5551, 0},
 	{DRM_FORMAT_XBGR1555, 0},
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
new file mode 100644
index 0000000..a30e1a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_H
+#define _SDE_HW_COLOR_PROCESSING_H
+
+#include "sde_hw_color_processing_v1_7.h"
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
new file mode 100644
index 0000000..f1f66f3
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -0,0 +1,453 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_color_processing_v1_7.h"
+
+#define PA_HUE_VIG_OFF		0x110
+#define PA_SAT_VIG_OFF		0x114
+#define PA_VAL_VIG_OFF		0x118
+#define PA_CONT_VIG_OFF		0x11C
+
+#define PA_HUE_DSPP_OFF		0x238
+#define PA_SAT_DSPP_OFF		0x23C
+#define PA_VAL_DSPP_OFF		0x240
+#define PA_CONT_DSPP_OFF	0x244
+
+#define PA_LUTV_DSPP_OFF	0x1400
+#define PA_LUT_SWAP_OFF		0x234
+
+#define PA_HUE_MASK		0xFFF
+#define PA_SAT_MASK		0xFFFF
+#define PA_VAL_MASK		0xFF
+#define PA_CONT_MASK		0xFF
+
+#define MEMCOL_PWL0_OFF		0x88
+#define MEMCOL_PWL0_MASK	0xFFFF07FF
+#define MEMCOL_PWL1_OFF		0x8C
+#define MEMCOL_PWL1_MASK	0xFFFFFFFF
+#define MEMCOL_HUE_REGION_OFF	0x90
+#define MEMCOL_HUE_REGION_MASK	0x7FF07FF
+#define MEMCOL_SAT_REGION_OFF	0x94
+#define MEMCOL_SAT_REGION_MASK	0xFFFFFF
+#define MEMCOL_VAL_REGION_OFF	0x98
+#define MEMCOL_VAL_REGION_MASK	0xFFFFFF
+#define MEMCOL_P0_LEN		0x14
+#define MEMCOL_P1_LEN		0x8
+#define MEMCOL_PWL2_OFF		0x218
+#define MEMCOL_PWL2_MASK	0xFFFFFFFF
+#define MEMCOL_BLEND_GAIN_OFF	0x21C
+#define MEMCOL_PWL_HOLD_OFF	0x214
+
+#define VIG_OP_PA_EN		BIT(4)
+#define VIG_OP_PA_SKIN_EN	BIT(5)
+#define VIG_OP_PA_FOL_EN	BIT(6)
+#define VIG_OP_PA_SKY_EN	BIT(7)
+#define VIG_OP_PA_HUE_EN	BIT(25)
+#define VIG_OP_PA_SAT_EN	BIT(26)
+#define VIG_OP_PA_VAL_EN	BIT(27)
+#define VIG_OP_PA_CONT_EN	BIT(28)
+
+#define DSPP_OP_SZ_VAL_EN	BIT(31)
+#define DSPP_OP_SZ_SAT_EN	BIT(30)
+#define DSPP_OP_SZ_HUE_EN	BIT(29)
+#define DSPP_OP_PA_HUE_EN	BIT(25)
+#define DSPP_OP_PA_SAT_EN	BIT(26)
+#define DSPP_OP_PA_VAL_EN	BIT(27)
+#define DSPP_OP_PA_CONT_EN	BIT(28)
+#define DSPP_OP_PA_EN		BIT(20)
+#define DSPP_OP_PA_LUTV_EN	BIT(19)
+#define DSPP_OP_PA_SKIN_EN	BIT(5)
+#define DSPP_OP_PA_FOL_EN	BIT(6)
+#define DSPP_OP_PA_SKY_EN	BIT(7)
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+
+#define PA_VIG_DISABLE_REQUIRED(x) \
+			!((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
+			VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
+			VIG_OP_PA_SAT_EN | VIG_OP_PA_VAL_EN | \
+			VIG_OP_PA_CONT_EN))
+
+
+#define PA_DSPP_DISABLE_REQUIRED(x) \
+			!((x) & (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | \
+			DSPP_OP_PA_FOL_EN | DSPP_OP_PA_HUE_EN | \
+			DSPP_OP_PA_SAT_EN | DSPP_OP_PA_VAL_EN | \
+			DSPP_OP_PA_CONT_EN | DSPP_OP_PA_LUTV_EN))
+
+#define DSPP_OP_PCC_ENABLE	BIT(0)
+#define PCC_OP_MODE_OFF		0
+#define PCC_CONST_COEFF_OFF	4
+#define PCC_R_COEFF_OFF		0x10
+#define PCC_G_COEFF_OFF		0x1C
+#define PCC_B_COEFF_OFF		0x28
+#define PCC_RG_COEFF_OFF	0x34
+#define PCC_RB_COEFF_OFF	0x40
+#define PCC_GB_COEFF_OFF	0x4C
+#define PCC_RGB_COEFF_OFF	0x58
+#define PCC_CONST_COEFF_MASK	0xFFFF
+#define PCC_COEFF_MASK		0x3FFFF
+
+#define SSPP	0
+#define DSPP	1
+
+static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t hue,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_HUE_DSPP_OFF : PA_HUE_VIG_OFF;
+	u32 op_hue_en = (location == DSPP) ? DSPP_OP_PA_HUE_EN :
+					VIG_OP_PA_HUE_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, hue & PA_HUE_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!hue) {
+		opmode &= ~op_hue_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_hue_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t hue = *((uint32_t *)cfg);
+
+	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic_blk, hue, SSPP);
+}
+
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	uint32_t hue = *((uint32_t *)cfg);
+
+	__setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
+}
+
+static void __setup_pa_sat(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t sat,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_SAT_DSPP_OFF : PA_SAT_VIG_OFF;
+	u32 op_sat_en = (location == DSPP) ?
+			DSPP_OP_PA_SAT_EN : VIG_OP_PA_SAT_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, sat & PA_SAT_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!sat) {
+		opmode &= ~op_sat_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_sat_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t sat = *((uint32_t *)cfg);
+
+	__setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic_blk, sat, SSPP);
+}
+
+static void __setup_pa_val(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t value,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_VAL_DSPP_OFF : PA_VAL_VIG_OFF;
+	u32 op_val_en = (location == DSPP) ?
+			DSPP_OP_PA_VAL_EN : VIG_OP_PA_VAL_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, value & PA_VAL_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!value) {
+		opmode &= ~op_val_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_val_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t value = *((uint32_t *)cfg);
+
+	__setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic_blk, value, SSPP);
+}
+
+static void __setup_pa_cont(struct sde_hw_blk_reg_map *hw,
+			const struct sde_pp_blk *blk, uint32_t contrast,
+			int location)
+{
+	u32 base = blk->base;
+	u32 offset = (location == DSPP) ? PA_CONT_DSPP_OFF : PA_CONT_VIG_OFF;
+	u32 op_cont_en = (location == DSPP) ? DSPP_OP_PA_CONT_EN :
+					VIG_OP_PA_CONT_EN;
+	u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+	u32 disable_req;
+	u32 opmode;
+
+	SDE_REG_WRITE(hw, base + offset, contrast & PA_CONT_MASK);
+
+	opmode = SDE_REG_READ(hw, base);
+
+	if (!contrast) {
+		opmode &= ~op_cont_en;
+		disable_req = (location == DSPP) ?
+			PA_DSPP_DISABLE_REQUIRED(opmode) :
+			PA_VIG_DISABLE_REQUIRED(opmode);
+		if (disable_req)
+			opmode &= ~op_pa_en;
+	} else {
+		opmode |= op_cont_en | op_pa_en;
+	}
+
+	SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+	uint32_t contrast = *((uint32_t *)cfg);
+
+	__setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic_blk, contrast, SSPP);
+}
+
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+				   enum sde_memcolor_type type,
+				   void *cfg)
+{
+	struct drm_msm_memcol *mc = cfg;
+	u32 base = ctx->cap->sblk->memcolor_blk.base;
+	u32 off, op, mc_en, hold = 0;
+	u32 mc_i = 0;
+
+	switch (type) {
+	case MEMCOLOR_SKIN:
+		mc_en = VIG_OP_PA_SKIN_EN;
+		mc_i = 0;
+		break;
+	case MEMCOLOR_SKY:
+		mc_en = VIG_OP_PA_SKY_EN;
+		mc_i = 1;
+		break;
+	case MEMCOLOR_FOLIAGE:
+		mc_en = VIG_OP_PA_FOL_EN;
+		mc_i = 2;
+		break;
+	default:
+		DRM_ERROR("Invalid memory color type %d\n", type);
+		return;
+	}
+
+	op = SDE_REG_READ(&ctx->hw, base);
+	if (!mc) {
+		op &= ~mc_en;
+		if (PA_VIG_DISABLE_REQUIRED(op))
+			op &= ~VIG_OP_PA_EN;
+		SDE_REG_WRITE(&ctx->hw, base, op);
+		return;
+	}
+
+	off = base + (mc_i * MEMCOL_P0_LEN);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL0_OFF),
+		      mc->color_adjust_p0 & MEMCOL_PWL0_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL1_OFF),
+		      mc->color_adjust_p1 & MEMCOL_PWL1_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_HUE_REGION_OFF),
+		      mc->hue_region & MEMCOL_HUE_REGION_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_SAT_REGION_OFF),
+		      mc->sat_region & MEMCOL_SAT_REGION_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_VAL_REGION_OFF),
+		      mc->val_region & MEMCOL_VAL_REGION_MASK);
+
+	off = base + (mc_i * MEMCOL_P1_LEN);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL2_OFF),
+		      mc->color_adjust_p2 & MEMCOL_PWL2_MASK);
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_BLEND_GAIN_OFF), mc->blend_gain);
+
+	hold = SDE_REG_READ(&ctx->hw, off + MEMCOL_PWL_HOLD_OFF);
+	hold &= ~(0xF << (mc_i * 4));
+	hold |= ((mc->sat_hold & 0x3) << (mc_i * 4));
+	hold |= ((mc->val_hold & 0x3) << ((mc_i * 4) + 2));
+	SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL_HOLD_OFF), hold);
+
+	op |= VIG_OP_PA_EN | mc_en;
+	SDE_REG_WRITE(&ctx->hw, base, op);
+}
+
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	struct drm_msm_pcc *pcc;
+	void  __iomem *base;
+
+	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
+		DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\
+			  exp size %zd\n",
+			   hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			   ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
+		return;
+	}
+	base = ctx->hw.base_off + ctx->cap->base;
+
+	/* Turn off feature */
+	if (!hw_cfg->payload) {
+		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
+			      PCC_OP_MODE_OFF);
+		return;
+	}
+	DRM_DEBUG_DRIVER("Enable PCC feature\n");
+	pcc = hw_cfg->payload;
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
+				  pcc->r.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
+		      pcc->g.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
+		      pcc->b.c & PCC_CONST_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
+				  pcc->r.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
+				  pcc->g.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
+				  pcc->b.r & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
+				  pcc->r.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
+				  pcc->g.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
+				  pcc->b.g & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
+				  pcc->r.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
+				  pcc->g.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
+				  pcc->b.b & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
+				  pcc->r.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
+				  pcc->g.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
+				  pcc->b.rg & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
+				  pcc->r.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
+				  pcc->g.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
+				  pcc->b.rb & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
+				  pcc->r.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
+				  pcc->g.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
+				  pcc->b.gb & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
+				  pcc->r.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
+		      pcc->g.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
+		      pcc->b.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, DSPP_OP_PCC_ENABLE);
+}
+
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_pa_vlut *payload = NULL;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	u32 base = ctx->cap->sblk->vlut.base;
+	u32 offset = base + PA_LUTV_DSPP_OFF;
+	u32 op_mode, tmp;
+	int i = 0, j = 0;
+
+	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+			sizeof(struct drm_msm_pa_vlut))) {
+		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			  ((hw_cfg) ? hw_cfg->len : 0),
+			  sizeof(struct drm_msm_pa_vlut));
+		return;
+	}
+	op_mode = SDE_REG_READ(&ctx->hw, base);
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable vlut feature\n");
+		/**
+		 * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
+		 * first, then check whether any other PA sub-features are
+		 * enabled or not. If none of the sub-features are enabled,
+		 * remove the PA global enable bit(20).
+		 */
+		op_mode &= ~((u32)DSPP_OP_PA_LUTV_EN);
+		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+			op_mode &= ~((u32)DSPP_OP_PA_EN);
+		SDE_REG_WRITE(&ctx->hw, base, op_mode);
+		return;
+	}
+	payload = hw_cfg->payload;
+	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+		tmp = (payload->val[i] & REG_MASK(10)) |
+			((payload->val[i + 1] & REG_MASK(10)) << 16);
+		SDE_REG_WRITE(&ctx->hw, (offset + j),
+			     tmp);
+	}
+	SDE_REG_WRITE(&ctx->hw, (base + PA_LUT_SWAP_OFF), 1);
+	op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
+	SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
new file mode 100644
index 0000000..0f9bc0e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_V1_7_H
+#define _SDE_HW_COLOR_PROCESSING_V1_7_H
+
+#include "sde_hw_sspp.h"
+#include "sde_hw_dspp.h"
+
+/**
+ * sde_setup_pipe_pa_hue_v1_7 - setup SSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_sat_v1_7 - setup SSPP saturation feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_val_v1_7 - setup SSPP value feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_cont_v1_7 - setup SSPP contrast feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to contrast data
+ */
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_memcol_v1_7 - setup SSPP memory color in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @type: Memory color type (Skin, sky, or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+				   enum sde_memcolor_type type,
+				   void *cfg);
+
+/**
+ * sde_setup_dspp_pcc_v1_7 - setup DSPP PCC veature in v1.7 hardware
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to PCC data
+ */
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_hue_v1_7 - setup DSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_vlut_v1_7 - setup DSPP PA vLUT feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to vLUT data
+ */
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 2f1bac7..56d9f2a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
 #define   CTL_LAYER_EXT(lm)             \
 	(0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+	(0x70 + (((lm) - LM_0) * 0x004))
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
@@ -127,6 +129,12 @@
 	case SSPP_DMA1:
 		flushbits = BIT(12);
 		break;
+	case SSPP_DMA2:
+		flushbits = BIT(24);
+		break;
+	case SSPP_DMA3:
+		flushbits = BIT(25);
+		break;
 	case SSPP_CURSOR0:
 		flushbits = BIT(22);
 		break;
@@ -258,11 +266,22 @@
 	return -EINVAL;
 }
 
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int i;
+
+	for (i = 0; i < ctx->mixer_count; i++) {
+		SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+		SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+	}
+}
+
 static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
 	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
-	u32 mixercfg, mixercfg_ext, mix, ext;
+	u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
 	int i, j;
 	u8 stages;
 	int pipes_per_stage;
@@ -282,6 +301,7 @@
 
 	mixercfg = BIT(24); /* always set BORDER_OUT */
 	mixercfg_ext = 0;
+	mixercfg_ext2 = 0;
 
 	for (i = 0; i <= stages; i++) {
 		/* overflow to ext register if 'i + 1 > 7' */
@@ -330,6 +350,14 @@
 				mixercfg |= mix << 21;
 				mixercfg_ext |= ext << 18;
 				break;
+			case SSPP_DMA2:
+				mix = (i + 1) & 0xf;
+				mixercfg_ext2 |= mix << 0;
+				break;
+			case SSPP_DMA3:
+				mix = (i + 1) & 0xf;
+				mixercfg_ext2 |= mix << 4;
+				break;
 			case SSPP_CURSOR0:
 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
 				break;
@@ -344,6 +372,7 @@
 
 	SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
 }
 
 static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
@@ -389,6 +418,7 @@
 	ops->trigger_start = sde_hw_ctl_trigger_start;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
+	ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
 	ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
 	ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
 	ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 2f9ff5b..2fb7b37 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -125,6 +125,18 @@
 		u32 *flushbits,
 		enum sde_wb blk);
 
+	/**
+	 * Set all blend stages to disabled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
+
+	/**
+	 * Configure layer mixer to pipe configuration
+	 * @ctx       : ctl path ctx pointer
+	 * @lm        : layer mixer enumeration
+	 * @cfg       : blend stage configuration
+	 */
 	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
 		enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 82ca83f..66c03f0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -9,46 +9,12 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include "drm/msm_drm_pp.h"
+#include <drm/msm_drm_pp.h>
 #include "sde_hw_mdss.h"
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_dspp.h"
-
-#define PCC_ENABLE BIT(0)
-#define PCC_OP_MODE_OFF 0
-#define PCC_CONST_COEFF_OFF 4
-#define PCC_R_COEFF_OFF 0x10
-#define PCC_G_COEFF_OFF 0x1C
-#define PCC_B_COEFF_OFF 0x28
-#define PCC_RG_COEFF_OFF 0x34
-#define PCC_RB_COEFF_OFF 0x40
-#define PCC_GB_COEFF_OFF 0x4C
-#define PCC_RGB_COEFF_OFF 0x58
-#define PCC_CONST_COEFF_MASK 0xFFFF
-#define PCC_COEFF_MASK 0x3FFFF
-
-#define REG_MASK(n) ((BIT(n)) - 1)
-#define PA_SZ_VAL_MASK   BIT(31)
-#define PA_SZ_SAT_MASK   BIT(30)
-#define PA_SZ_HUE_MASK   BIT(29)
-#define PA_CONT_MASK     BIT(28)
-#define PA_VAL_MASK      BIT(27)
-#define PA_SAT_MASK      BIT(26)
-#define PA_HUE_MASK      BIT(25)
-#define PA_LUTV_MASK     BIT(19)
-#define PA_HIST_MASK     BIT(16)
-#define PA_MEM_SKY_MASK  BIT(7)
-#define PA_MEM_FOL_MASK  BIT(6)
-#define PA_MEM_SKIN_MASK BIT(5)
-#define PA_ENABLE        BIT(20)
-
-#define PA_ENABLE_MASK (PA_SZ_VAL_MASK | PA_SZ_SAT_MASK | PA_SZ_HUE_MASK \
-			| PA_CONT_MASK | PA_VAL_MASK | PA_SAT_MASK \
-			| PA_HUE_MASK | PA_LUTV_MASK | PA_HIST_MASK \
-			| PA_MEM_SKY_MASK | PA_MEM_FOL_MASK | PA_MEM_SKIN_MASK)
-
-#define PA_LUT_SWAP_OFF 0x234
+#include "sde_hw_color_processing.h"
 
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
@@ -82,157 +48,10 @@
 {
 }
 
-void sde_dspp_setup_pa(struct sde_hw_dspp *dspp, void *cfg)
-{
-}
-
-void sde_dspp_setup_hue(struct sde_hw_dspp *dspp, void *cfg)
-{
-}
-
-void sde_dspp_setup_vlut(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct drm_msm_pa_vlut *payload = NULL;
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	u32 op_mode, tmp;
-	int i = 0, j = 0;
-
-	if (!hw_cfg  || (hw_cfg->payload && hw_cfg->len !=
-			sizeof(struct drm_msm_pa_vlut))) {
-		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
-			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
-			  ((hw_cfg) ? hw_cfg->len : 0),
-			  sizeof(struct drm_msm_pa_vlut));
-		return;
-	}
-	op_mode = SDE_REG_READ(&ctx->hw, 0);
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable vlut feature\n");
-		/**
-		 * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
-		 * first, then check whether any other PA sub-features are
-		 * enabled or not. If none of the sub-features are enabled,
-		 * remove the PA global enable bit(20).
-		 */
-		op_mode &= ~((u32)PA_LUTV_MASK);
-		if (!(op_mode & PA_ENABLE_MASK))
-			op_mode &= ~((u32)PA_ENABLE);
-		SDE_REG_WRITE(&ctx->hw, 0, op_mode);
-		return;
-	}
-	payload = hw_cfg->payload;
-	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
-	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
-		tmp = (payload->val[i] & REG_MASK(10)) |
-			((payload->val[i + 1] & REG_MASK(10)) << 16);
-		SDE_REG_WRITE(&ctx->hw, (ctx->cap->sblk->vlut.base + j),
-			     tmp);
-	}
-	SDE_REG_WRITE(&ctx->hw, PA_LUT_SWAP_OFF, 1);
-	op_mode |= PA_ENABLE | PA_LUTV_MASK;
-	SDE_REG_WRITE(&ctx->hw, 0, op_mode);
-}
-
-void sde_dspp_setup_pcc(struct sde_hw_dspp *ctx, void *cfg)
-{
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct drm_msm_pcc *pcc;
-	void  __iomem *base;
-
-	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
-		DRM_ERROR(
-			"hw_cfg %pK payload %pK payload size %d exp size %zd\n",
-			hw_cfg, (hw_cfg ? hw_cfg->payload : NULL),
-			((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
-		return;
-	}
-	base = ctx->hw.base_off + ctx->cap->base;
-
-	/* Turn off feature */
-	if (!hw_cfg->payload) {
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
-			      PCC_OP_MODE_OFF);
-		return;
-	}
-	DRM_DEBUG_DRIVER("Enable PCC feature\n");
-	pcc = hw_cfg->payload;
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
-				  pcc->r.c & PCC_CONST_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
-		      pcc->g.c & PCC_CONST_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
-		      pcc->b.c & PCC_CONST_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
-				  pcc->r.r & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
-				  pcc->g.r & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
-				  pcc->b.r & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
-				  pcc->r.g & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
-				  pcc->g.g & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
-				  pcc->b.g & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
-				  pcc->r.b & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
-				  pcc->g.b & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
-				  pcc->b.b & PCC_COEFF_MASK);
-
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
-				  pcc->r.rg & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
-				  pcc->g.rg & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
-				  pcc->b.rg & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
-				  pcc->r.rb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
-				  pcc->g.rb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
-				  pcc->b.rb & PCC_COEFF_MASK);
-
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
-				  pcc->r.gb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
-				  pcc->g.gb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
-				  pcc->b.gb & PCC_COEFF_MASK);
-
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
-				  pcc->r.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
-		      pcc->g.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw,
-		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
-		      pcc->b.rgb & PCC_COEFF_MASK);
-	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, PCC_ENABLE);
-}
-
 void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
 {
 }
 
-void sde_dspp_setup_pa_memcolor(struct sde_hw_dspp *ctx, void *cfg)
-{
-}
-
-void sde_dspp_setup_sixzone(struct sde_hw_dspp *dspp)
-{
-}
-
 void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg)
 {
 }
@@ -251,23 +70,22 @@
 		switch (i) {
 		case SDE_DSPP_PCC:
 			if (c->cap->sblk->pcc.version ==
-				(SDE_COLOR_PROCESS_VER(0x1, 0x0)))
-				c->ops.setup_pcc = sde_dspp_setup_pcc;
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+				c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
 			break;
 		case SDE_DSPP_HSIC:
 			if (c->cap->sblk->hsic.version ==
-				(SDE_COLOR_PROCESS_VER(0x1, 0x0)))
-				c->ops.setup_hue = sde_dspp_setup_hue;
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+				c->ops.setup_hue = sde_setup_dspp_pa_hue_v1_7;
 			break;
 		case SDE_DSPP_VLUT:
 			if (c->cap->sblk->vlut.version ==
-				(SDE_COLOR_PROCESS_VER(0x1, 0x0))) {
-				c->ops.setup_vlut = sde_dspp_setup_vlut;
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+				c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_7;
 			}
 		default:
 			break;
 		}
-
 	}
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 6ffc4b6..25e1f3b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -36,11 +36,11 @@
 	void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
 
 	/**
-	 * update_igc - update dspp igc
+	 * setup_igc - update dspp igc
 	 * @ctx: Pointer to dspp context
 	 * @cfg: Pointer to configuration
 	 */
-	void (*update_igc)(struct sde_hw_dspp *ctx, void *cfg);
+	void (*setup_igc)(struct sde_hw_dspp *ctx, void *cfg);
 
 	/**
 	 * setup_pa - setup dspp pa
@@ -75,7 +75,7 @@
 	 * @ctx: Pointer to dspp context
 	 * @cfg: Pointer to configuration
 	 */
-	void (*setup_sixzone)(struct sde_hw_dspp *dspp);
+	void (*setup_sixzone)(struct sde_hw_dspp *dspp, void *cfg);
 
 	/**
 	 * setup_danger_safe - setup danger safe LUTS
@@ -125,6 +125,20 @@
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_gc - update dspp gc
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_gc)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_gamut - update dspp gamut
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index f408194..f0fc8f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -62,6 +62,15 @@
 #define   INTF_FRAME_COUNT              0x0AC
 #define   INTF_LINE_COUNT               0x0B0
 
+#define INTF_MISR_CTRL			0x180
+#define INTF_MISR_SIGNATURE		0x184
+
+#define MISR_FRAME_COUNT_MASK		0xFF
+#define MISR_CTRL_ENABLE		BIT(8)
+#define MISR_CTRL_STATUS		BIT(9)
+#define MISR_CTRL_STATUS_CLEAR		BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
+
 static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -239,6 +248,50 @@
 	}
 }
 
+static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
+		struct sde_misr_params *misr_map)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	u32 config = 0;
+
+	if (!misr_map)
+		return;
+
+	SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* Clear data */
+	wmb();
+
+	if (misr_map->enable) {
+		config = (MISR_FRAME_COUNT_MASK & 1) |
+			(MISR_CTRL_ENABLE);
+
+		SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
+	} else {
+		SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
+	}
+}
+
+static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
+		struct sde_misr_params *misr_map)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+
+	if (!misr_map)
+		return;
+
+	if (misr_map->enable) {
+		if (misr_map->last_idx < misr_map->frame_count &&
+			misr_map->last_idx < SDE_CRC_BATCH_SIZE)
+			misr_map->crc_value[misr_map->last_idx] =
+				SDE_REG_READ(c, INTF_MISR_SIGNATURE);
+	}
+
+	misr_map->enable =
+		misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
+
+	misr_map->last_idx++;
+}
+
 static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
 		unsigned long cap)
 {
@@ -246,6 +299,8 @@
 	ops->setup_prg_fetch  = sde_hw_intf_setup_prg_fetch;
 	ops->get_status = sde_hw_intf_get_status;
 	ops->enable_timing = sde_hw_intf_enable_timing_engine;
+	ops->setup_misr = sde_hw_intf_set_misr;
+	ops->collect_misr = sde_hw_intf_collect_misr;
 }
 
 struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index 133990d..f4a01cb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -19,6 +19,24 @@
 
 struct sde_hw_intf;
 
+/* Batch size of frames for collecting MISR data */
+#define SDE_CRC_BATCH_SIZE 16
+
+/**
+ * struct sde_misr_params : Interface for getting and setting MISR data
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ enable : enables/disables MISR
+ * @ frame_count : represents number of frames for which MISR is enabled
+ * @ last_idx: number of frames for which MISR data is collected
+ * @ crc_value: stores the collected MISR data
+ */
+struct sde_misr_params {
+	bool enable;
+	u32 frame_count;
+	u32 last_idx;
+	u32 crc_value[SDE_CRC_BATCH_SIZE];
+};
+
 /* intf timing settings */
 struct intf_timing_params {
 	u32 width;		/* active width */
@@ -58,6 +76,8 @@
  * @ setup_prog_fetch : enables/disables the programmable fetch logic
  * @ enable_timing: enable/disable timing engine
  * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
  */
 struct sde_hw_intf_ops {
 	void (*setup_timing_gen)(struct sde_hw_intf *intf,
@@ -72,6 +92,12 @@
 
 	void (*get_status)(struct sde_hw_intf *intf,
 			struct intf_status *status);
+
+	void (*setup_misr)(struct sde_hw_intf *intf,
+			struct sde_misr_params *misr_map);
+
+	void (*collect_misr)(struct sde_hw_intf *intf,
+			struct sde_misr_params *misr_map);
 };
 
 struct sde_hw_intf {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index d912f31..20e42b0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -37,10 +37,15 @@
 #endif
 
 #define PIPES_PER_STAGE			2
+#ifndef SDE_MAX_DE_CURVES
+#define SDE_MAX_DE_CURVES		3
+#endif
 
 #define SDE_FORMAT_FLAG_YUV		(1 << 0)
+#define SDE_FORMAT_FLAG_DX		(1 << 1)
 
 #define SDE_FORMAT_IS_YUV(X)		((X)->flag & SDE_FORMAT_FLAG_YUV)
+#define SDE_FORMAT_IS_DX(X)		((X)->flag & SDE_FORMAT_FLAG_DX)
 #define SDE_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == SDE_FETCH_LINEAR)
 #define SDE_FORMAT_IS_UBWC(X)		((X)->fetch_mode == SDE_FETCH_UBWC)
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a478a7c..929c59a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -14,6 +14,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_lm.h"
 #include "sde_hw_sspp.h"
+#include "sde_hw_color_processing.h"
 
 #define SDE_FETCH_CONFIG_RESET_VALUE   0x00000087
 
@@ -64,6 +65,7 @@
 #define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
 #define SSPP_UBWC_ERROR_STATUS             0x138
 #define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
 
 /* SSPP_QOS_CTRL */
 #define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
@@ -85,6 +87,57 @@
 #define COMP1_2_INIT_PHASE_Y               0x2C
 #define VIG_0_QSEED2_SHARP                 0x30
 
+/* SDE_SSPP_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION                  0x00
+#define QSEED3_OP_MODE                     0x04
+#define QSEED3_RGB2Y_COEFF                 0x08
+#define QSEED3_PHASE_INIT                  0x0C
+#define QSEED3_PHASE_STEP_Y_H              0x10
+#define QSEED3_PHASE_STEP_Y_V              0x14
+#define QSEED3_PHASE_STEP_UV_H             0x18
+#define QSEED3_PHASE_STEP_UV_V             0x1C
+#define QSEED3_PRELOAD                     0x20
+#define QSEED3_DE_SHARPEN                  0x24
+#define QSEED3_DE_SHARPEN_CTL              0x28
+#define QSEED3_DE_SHAPE_CTL                0x2C
+#define QSEED3_DE_THRESHOLD                0x30
+#define QSEED3_DE_ADJUST_DATA_0            0x34
+#define QSEED3_DE_ADJUST_DATA_1            0x38
+#define QSEED3_DE_ADJUST_DATA_2            0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
+#define QSEED3_SRC_SIZE_UV                 0x44
+#define QSEED3_DST_SIZE                    0x48
+#define QSEED3_COEF_LUT_CTRL               0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT           0
+#define QSEED3_COEF_LUT_DIR_BIT            1
+#define QSEED3_COEF_LUT_Y_CIR_BIT          2
+#define QSEED3_COEF_LUT_UV_CIR_BIT         3
+#define QSEED3_COEF_LUT_Y_SEP_BIT          4
+#define QSEED3_COEF_LUT_UV_SEP_BIT         5
+#define QSEED3_BUFFER_CTRL                 0x50
+#define QSEED3_CLK_CTRL0                   0x54
+#define QSEED3_CLK_CTRL1                   0x58
+#define QSEED3_CLK_STATUS                  0x5C
+#define QSEED3_MISR_CTRL                   0x70
+#define QSEED3_MISR_SIGNATURE_0            0x74
+#define QSEED3_MISR_SIGNATURE_1            0x78
+#define QSEED3_PHASE_INIT_Y_H              0x90
+#define QSEED3_PHASE_INIT_Y_V              0x94
+#define QSEED3_PHASE_INIT_UV_H             0x98
+#define QSEED3_PHASE_INIT_UV_V             0x9C
+#define QSEED3_COEF_LUT                    0x100
+#define QSEED3_FILTERS                     5
+#define QSEED3_LUT_REGIONS                 4
+#define QSEED3_CIRCULAR_LUTS               9
+#define QSEED3_SEPARABLE_LUTS              10
+#define QSEED3_LUT_SIZE                    60
+#define QSEED3_ENABLE                      2
+#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
 /*
  * Definitions for ViG op modes
  */
@@ -103,6 +156,13 @@
 #define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
 #define VIG_OP_MEM_PROT_BLEND  BIT(1)
 
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
 static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
@@ -123,16 +183,17 @@
 		*idx = sblk->scaler_blk.base;
 		break;
 	case SDE_SSPP_CSC:
+	case SDE_SSPP_CSC_10BIT:
 		*idx = sblk->csc_blk.base;
 		break;
 	case SDE_SSPP_HSIC:
-		*idx = sblk->hsic.base;
+		*idx = sblk->hsic_blk.base;
 		break;
 	case SDE_SSPP_PCC:
 		*idx = sblk->pcc_blk.base;
 		break;
 	case SDE_SSPP_MEMCOLOR:
-		*idx = sblk->memcolor.base;
+		*idx = sblk->memcolor_blk.base;
 		break;
 	default:
 		rc = -EINVAL;
@@ -147,18 +208,39 @@
 	u32 idx;
 	u32 opmode;
 
-	if (!_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) &&
-			test_bit(SDE_SSPP_CSC, &ctx->cap->features)) {
-		opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+	if (!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+		_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) ||
+		!test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+		return;
 
-		if (en)
-			opmode |= mask;
-		else
-			opmode &= ~mask;
+	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
 
-		SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
-	}
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
 }
+
+static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC_10BIT, &idx))
+		return;
+
+	opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
 /**
  * Setup source pixel format, flip,
  */
@@ -230,9 +312,17 @@
 	if (SDE_FORMAT_IS_YUV(fmt))
 		src_format |= BIT(15);
 
+	if (SDE_FORMAT_IS_DX(fmt))
+		src_format |= BIT(14);
+
 	/* update scaler opmode, if appropriate */
-	_sspp_setup_opmode(ctx,
-		VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT, SDE_FORMAT_IS_YUV(fmt));
+	if (test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+			SDE_FORMAT_IS_YUV(fmt));
+	else if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+		_sspp_setup_csc10_opmode(ctx,
+			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+			SDE_FORMAT_IS_YUV(fmt));
 
 	SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
 	SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
@@ -302,13 +392,17 @@
 }
 
 static void _sde_hw_sspp_setup_scaler(struct sde_hw_pipe *ctx,
-		struct sde_hw_pixel_ext *pe)
+		struct sde_hw_pipe_cfg *sspp,
+		struct sde_hw_pixel_ext *pe,
+		void *scaler_cfg)
 {
 	struct sde_hw_blk_reg_map *c;
 	int config_h = 0x0;
 	int config_v = 0x0;
 	u32 idx;
 
+	(void)sspp;
+	(void)scaler_cfg;
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !pe)
 		return;
 
@@ -355,12 +449,249 @@
 		pe->phase_step_y[SDE_SSPP_COMP_1_2]);
 }
 
+static void _sde_hw_sspp_setup_scaler3_lut(struct sde_hw_pipe *ctx,
+		struct sde_hw_scaler3_cfg *scaler3_cfg)
+{
+	u32 idx;
+	int i, j, filter;
+	int config_lut = 0x0;
+	unsigned long lut_flags;
+	u32 lut_addr, lut_offset, lut_len;
+	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+	static const uint32_t offset[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+	};
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) ||
+		!scaler3_cfg)
+		return;
+
+	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+		lut[0] = scaler3_cfg->dir_lut;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[1] = scaler3_cfg->cir_lut +
+			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[2] = scaler3_cfg->cir_lut +
+			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[3] = scaler3_cfg->sep_lut +
+			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[4] = scaler3_cfg->sep_lut +
+			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+
+	if (config_lut) {
+		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+			if (!lut[filter])
+				continue;
+			lut_offset = 0;
+			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+				lut_addr = QSEED3_COEF_LUT + idx
+					+ offset[filter][i][1];
+				lut_len = offset[filter][i][0] << 2;
+				for (j = 0; j < lut_len; j++) {
+					SDE_REG_WRITE(&ctx->hw,
+						lut_addr,
+						(lut[filter])[lut_offset++]);
+					lut_addr += 4;
+				}
+			}
+		}
+	}
+
+	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+		SDE_REG_WRITE(&ctx->hw, QSEED3_COEF_LUT_CTRL + idx, BIT(0));
+
+}
+
+static void _sde_hw_sspp_setup_scaler3_de(struct sde_hw_pipe *ctx,
+		struct sde_hw_scaler3_de_cfg *de_cfg)
+{
+	u32 idx;
+	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+	u32 adjust_a, adjust_b, adjust_c;
+	struct sde_hw_blk_reg_map *hw;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !de_cfg)
+		return;
+
+	if (!de_cfg->enable)
+		return;
+
+	hw = &ctx->hw;
+	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+		((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+		((de_cfg->prec_shift & 0x7) << 13) |
+		((de_cfg->clip & 0x7) << 16);
+
+	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+		((de_cfg->thr_dieout & 0x3FF) << 16);
+
+	de_thr = (de_cfg->thr_low & 0x3FF) |
+		((de_cfg->thr_high & 0x3FF) << 16);
+
+	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN + idx, sharp_lvl);
+	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN_CTL + idx, sharp_ctl);
+	SDE_REG_WRITE(hw, QSEED3_DE_SHAPE_CTL + idx, shape_ctl);
+	SDE_REG_WRITE(hw, QSEED3_DE_THRESHOLD + idx, de_thr);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_0 + idx, adjust_a);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_1 + idx, adjust_b);
+	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_2 + idx, adjust_c);
+
+}
+
+static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *sspp,
+		struct sde_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	u32 idx;
+	u32 op_mode = 0;
+	u32 phase_init, preload, src_y_rgb, src_uv, dst;
+	struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+	(void)pe;
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !sspp
+		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+		return;
+
+	if (!scaler3_cfg->enable) {
+		SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, 0x0);
+		return;
+	}
+
+	op_mode |= BIT(0);
+	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+	if (SDE_FORMAT_IS_YUV(sspp->layout.format)) {
+		op_mode |= BIT(12);
+		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+	}
+
+	if (!SDE_FORMAT_IS_DX(sspp->layout.format))
+		op_mode |= BIT(14);
+
+	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+	preload =
+		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+	if (scaler3_cfg->de.enable) {
+		_sde_hw_sspp_setup_scaler3_de(ctx, &scaler3_cfg->de);
+		op_mode |= BIT(8);
+	}
+
+	if (scaler3_cfg->lut_flag)
+		_sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg);
+
+	if (ctx->cap->sblk->scaler_blk.version == 0x1002) {
+		if (sspp->layout.format->alpha_enable) {
+			op_mode |= BIT(10);
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+		}
+		phase_init =
+			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init);
+	} else {
+		if (sspp->layout.format->alpha_enable) {
+			op_mode |= BIT(10);
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+		}
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx,
+			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx,
+			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_H + idx,
+			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_V + idx,
+			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+	}
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_H + idx,
+		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_V + idx,
+		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_H + idx,
+		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_V + idx,
+		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_PRELOAD + idx, preload);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_Y_RGB_A + idx, src_y_rgb);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_UV + idx, src_uv);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst);
+
+	SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
+}
+
 /**
  * sde_hw_sspp_setup_rects()
  */
 static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
 		struct sde_hw_pipe_cfg *cfg,
-		struct sde_hw_pixel_ext *pe_ext)
+		struct sde_hw_pixel_ext *pe_ext,
+		void *scale_cfg)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
@@ -392,8 +723,7 @@
 		/* program decimation */
 		decimation = ((1 << cfg->horz_decimation) - 1) << 8;
 		decimation |= ((1 << cfg->vert_decimation) - 1);
-
-		_sde_hw_sspp_setup_scaler(ctx, pe_ext);
+		ctx->ops.setup_scaler(ctx, cfg, pe_ext, scale_cfg);
 	}
 
 	/* rectangle register programming */
@@ -429,6 +759,9 @@
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data)
 		return;
 
+	if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+		idx += CSC_10BIT_OFFSET;
+
 	sde_hw_csc_setup(&ctx->hw, idx, data);
 }
 
@@ -508,25 +841,51 @@
 	SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
 }
 
-static void _setup_layer_ops(struct sde_hw_sspp_ops *ops,
+static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
 	if (test_bit(SDE_SSPP_SRC, &features)) {
-		ops->setup_format = sde_hw_sspp_setup_format;
-		ops->setup_rects = sde_hw_sspp_setup_rects;
-		ops->setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
-		ops->setup_solidfill = sde_hw_sspp_setup_solidfill;
+		c->ops.setup_format = sde_hw_sspp_setup_format;
+		c->ops.setup_rects = sde_hw_sspp_setup_rects;
+		c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
+		c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
 	}
 	if (test_bit(SDE_SSPP_QOS, &features)) {
-		ops->setup_danger_safe_lut = sde_hw_sspp_setup_danger_safe_lut;
-		ops->setup_creq_lut = sde_hw_sspp_setup_creq_lut;
-		ops->setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
+		c->ops.setup_danger_safe_lut =
+			sde_hw_sspp_setup_danger_safe_lut;
+		c->ops.setup_creq_lut = sde_hw_sspp_setup_creq_lut;
+		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
 	}
-	if (test_bit(SDE_SSPP_CSC, &features))
-		ops->setup_csc = sde_hw_sspp_setup_csc;
+
+	if (test_bit(SDE_SSPP_CSC, &features) ||
+		test_bit(SDE_SSPP_CSC_10BIT, &features))
+		c->ops.setup_csc = sde_hw_sspp_setup_csc;
 
 	if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
-		ops->setup_sharpening = sde_hw_sspp_setup_sharpening;
+		c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
+
+	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+	else
+		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler;
+
+	if (test_bit(SDE_SSPP_HSIC, &features)) {
+		/* TODO: add version based assignment here as inline or macro */
+		if (c->cap->sblk->hsic_blk.version ==
+			(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+			c->ops.setup_pa_hue = sde_setup_pipe_pa_hue_v1_7;
+			c->ops.setup_pa_sat = sde_setup_pipe_pa_sat_v1_7;
+			c->ops.setup_pa_val = sde_setup_pipe_pa_val_v1_7;
+			c->ops.setup_pa_cont = sde_setup_pipe_pa_cont_v1_7;
+		}
+	}
+
+	if (test_bit(SDE_SSPP_MEMCOLOR, &features)) {
+		if (c->cap->sblk->memcolor_blk.version ==
+			(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+			c->ops.setup_pa_memcolor =
+				sde_setup_pipe_pa_memcol_v1_7;
+	}
 }
 
 static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
@@ -571,7 +930,7 @@
 	/* Assign ops */
 	ctx->idx = idx;
 	ctx->cap = cfg;
-	_setup_layer_ops(&ctx->ops, ctx->cap->features);
+	_setup_layer_ops(ctx, ctx->cap->features);
 	ctx->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
 
 	return ctx;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 43070fa..743f5e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -17,6 +17,7 @@
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
 #include "sde_formats.h"
+#include "sde_color_processing.h"
 
 struct sde_hw_pipe;
 
@@ -119,8 +120,112 @@
 
 };
 
+/**
+ * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable:         detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip:          clip shift
+ * @ limit:         limit value
+ * @ thr_quiet:     quiet threshold
+ * @ thr_dieout:    dieout threshold
+ * @ thr_high:      low threshold
+ * @ thr_high:      high threshold
+ * @ prec_shift:    precision shift
+ * @ adjust_a:      A-coefficients for mapping curve
+ * @ adjust_b:      B-coefficients for mapping curve
+ * @ adjust_c:      C-coefficients for mapping curve
+ */
+struct sde_hw_scaler3_de_cfg {
+	u32 enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable:        scaler enable
+ * @dir_en:        direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x:    horizontal preload value
+ * @ preload_y:    vertical preload value
+ * @ src_width:    source width
+ * @ src_height:   source height
+ * @ dst_width:    destination width
+ * @ dst_height:   destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg:    blend coefficients configuration
+ * @ lut_flag:     scaler LUT update flags
+ *                 0x1 swap LUT bank
+ *                 0x2 update 2D filter LUT
+ *                 0x4 update y circular filter LUT
+ *                 0x8 update uv circular filter LUT
+ *                 0x10 update y separable filter LUT
+ *                 0x20 update uv separable filter LUT
+ * @ dir_lut_idx:  2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut:      pointer to 2D LUT
+ * @ cir_lut:      pointer to circular filter LUT
+ * @ sep_lut:      pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
 struct sde_hw_scaler3_cfg {
-	uint32_t filter_mode;
+	u32 enable;
+	u32 dir_en;
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	u32 preload_x[SDE_MAX_PLANES];
+	u32 preload_y[SDE_MAX_PLANES];
+	u32 src_width[SDE_MAX_PLANES];
+	u32 src_height[SDE_MAX_PLANES];
+
+	u32 dst_width;
+	u32 dst_height;
+
+	u32 y_rgb_filter_cfg;
+	u32 uv_filter_cfg;
+	u32 alpha_filter_cfg;
+	u32 blend_cfg;
+
+	u32 lut_flag;
+	u32 dir_lut_idx;
+
+	u32 y_rgb_cir_lut_idx;
+	u32 uv_cir_lut_idx;
+	u32 y_rgb_sep_lut_idx;
+	u32 uv_sep_lut_idx;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_hw_scaler3_de_cfg de;
 };
 
 /**
@@ -184,10 +289,12 @@
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to pipe config structure
 	 * @pe_ext: Pointer to pixel ext settings
+	 * @scale_cfg: Pointer to scaler settings
 	 */
 	void (*setup_rects)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_cfg *cfg,
-			struct sde_hw_pixel_ext *pe_ext);
+			struct sde_hw_pixel_ext *pe_ext,
+			void *scale_cfg);
 
 	/**
 	 * setup_sourceaddress - setup pipe source addresses
@@ -220,14 +327,43 @@
 	void (*setup_sharpening)(struct sde_hw_pipe *ctx,
 			struct sde_hw_sharp_cfg *cfg);
 
+
+	/**
+	 * setup_pa_hue(): Setup source hue adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to hue data
+	 */
+	void (*setup_pa_hue)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_sat(): Setup source saturation adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to saturation data
+	 */
+	void (*setup_pa_sat)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_val(): Setup source value adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to value data
+	 */
+	void (*setup_pa_val)(struct sde_hw_pipe *ctx, void *cfg);
+
+	/**
+	 * setup_pa_cont(): Setup source contrast adjustment
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer contrast data
+	 */
+	void (*setup_pa_cont)(struct sde_hw_pipe *ctx, void *cfg);
+
 	/**
 	 * setup_pa_memcolor - setup source color processing
 	 * @ctx: Pointer to pipe context
-	 * @memcolortype: Memcolor type
-	 * @en: PA enable
+	 * @type: Memcolor type (Skin, sky or foliage)
+	 * @cfg: Pointer to memory color config data
 	 */
 	void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
-			u32 memcolortype, u8 en);
+			enum sde_memcolor_type type, void *cfg);
 
 	/**
 	 * setup_igc - setup inverse gamma correction
@@ -269,6 +405,18 @@
 	 */
 	void (*setup_histogram)(struct sde_hw_pipe *ctx,
 			void *cfg);
+
+	/**
+	 * setup_scaler - setup scaler
+	 * @ctx: Pointer to pipe context
+	 * @pipe_cfg: Pointer to pipe configuration
+	 * @pe_cfg: Pointer to pixel extension configuration
+	 * @scaler_cfg: Pointer to scaler configuration
+	 */
+	void (*setup_scaler)(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cfg *pipe_cfg,
+		struct sde_hw_pixel_ext *pe_cfg,
+		void *scaler_cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 10d3917..1a5d469 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -22,6 +22,9 @@
 #define FLD_INTF_2_SW_TRG_MUX             BIT(8)
 #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
 
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
 #define TE_LINE_INTERVAL                  0x3F4
 
 #define TRAFFIC_SHAPER_EN                 BIT(31)
@@ -29,13 +32,16 @@
 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
 
-static void sde_hw_setup_split_pipe_control(struct sde_hw_mdp *mdp,
+static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
 		struct split_pipe_cfg *cfg)
 {
 	struct sde_hw_blk_reg_map *c = &mdp->hw;
 	u32 upper_pipe = 0;
 	u32 lower_pipe = 0;
 
+	if (!mdp || !cfg)
+		return;
+
 	if (cfg->en) {
 		if (cfg->mode == INTF_MODE_CMD) {
 			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
@@ -46,7 +52,7 @@
 				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
 
 			/* free run */
-			if (cfg->pp_split)
+			if (cfg->pp_split_slave != INTF_MAX)
 				lower_pipe = FLD_SMART_PANEL_FREE_RUN;
 
 			upper_pipe = lower_pipe;
@@ -61,12 +67,39 @@
 		}
 	}
 
-	SDE_REG_WRITE(c, SSPP_SPARE, (cfg->split_flush_en) ? 0x1 : 0x0);
+	SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
 	SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
 	SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
 	SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
 }
 
+static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	u32 ppb_config = 0x0;
+	u32 ppb_control = 0x0;
+
+	if (!mdp || !cfg)
+		return;
+
+	if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
+		ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
+		ppb_config |= BIT(16); /* split enable */
+		ppb_control = BIT(5); /* horz split*/
+	}
+	if (cfg->pp_split_index) {
+		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
+	} else {
+		SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
+		SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
+		SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
+	}
+}
+
 static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
 		struct cdm_output_cfg *cfg)
 {
@@ -109,12 +142,72 @@
 	return clk_forced_on;
 }
 
+
+static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
+		struct sde_danger_safe_status *status)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 value;
+
+	value = SDE_REG_READ(c, DANGER_STATUS);
+	status->mdp = (value >> 0) & 0x3;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+	status->wb[WB_0] = 0;
+	status->wb[WB_1] = 0;
+	status->wb[WB_2] = (value >> 2) & 0x3;
+	status->wb[WB_3] = 0;
+}
+
+static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
+		struct sde_danger_safe_status *status)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+	u32 value;
+
+	value = SDE_REG_READ(c, SAFE_STATUS);
+	status->mdp = (value >> 0) & 0x1;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+	status->wb[WB_0] = 0;
+	status->wb[WB_1] = 0;
+	status->wb[WB_2] = (value >> 2) & 0x1;
+	status->wb[WB_3] = 0;
+}
+
 static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 		unsigned long cap)
 {
-	ops->setup_split_pipe = sde_hw_setup_split_pipe_control;
+	ops->setup_split_pipe = sde_hw_setup_split_pipe;
+	ops->setup_pp_split = sde_hw_setup_pp_split;
 	ops->setup_cdm_output = sde_hw_setup_cdm_output;
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
+	ops->get_danger_status = sde_hw_get_danger_status;
+	ops->get_safe_status = sde_hw_get_safe_status;
 }
 
 static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 7359a77..780d051 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -40,7 +40,8 @@
  * @en        : Enable/disable dual pipe confguration
  * @mode      : Panel interface mode
  * @intf      : Interface id for main control path
- * @pp_split  : Ping pong split is enabled or disabled
+ * @pp_split_slave: Slave interface for ping pong split, INTF_MAX to disable
+ * @pp_split_idx:   Ping pong index for ping pong split
  * @split_flush_en: Allows both the paths to be flushed when master path is
  *              flushed
  */
@@ -48,7 +49,8 @@
 	bool en;
 	enum sde_intf_mode mode;
 	enum sde_intf intf;
-	bool pp_split;
+	enum sde_intf pp_split_slave;
+	u32 pp_split_index;
 	bool split_flush_en;
 };
 
@@ -63,9 +65,22 @@
 };
 
 /**
+ * struct sde_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ * @wb: writebck output status
+ */
+struct sde_danger_safe_status {
+	u8 mdp;
+	u8 sspp[SSPP_MAX];
+	u8 wb[WB_MAX];
+};
+
+/**
  * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
  * Assumption is these functions will be called after clocks are enabled.
  * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
  * @setup_cdm_output : programs cdm control
  * @setup_traffic_shaper : programs traffic shaper control
  */
@@ -78,6 +93,13 @@
 	void (*setup_split_pipe)(struct sde_hw_mdp *mdp,
 			struct split_pipe_cfg *p);
 
+	/** setup_pp_split() : Configure pp split related registers
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_pp_split)(struct sde_hw_mdp *mdp,
+			struct split_pipe_cfg *cfg);
+
 	/**
 	 * setup_cdm_output() : Setup selection control of the cdm data path
 	 * @mdp  : mdp top context driver
@@ -103,6 +125,22 @@
 	 */
 	bool (*setup_clk_force_ctrl)(struct sde_hw_mdp *mdp,
 			enum sde_clk_ctrl_type clk_ctrl, bool enable);
+
+	/**
+	 * get_danger_status - get danger status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_danger_status)(struct sde_hw_mdp *mdp,
+			struct sde_danger_safe_status *status);
+
+	/**
+	 * get_safe_status - get safe status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_safe_status)(struct sde_hw_mdp *mdp,
+			struct sde_danger_safe_status *status);
 };
 
 struct sde_hw_mdp {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index c68ee23..426e999 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -107,6 +107,9 @@
 			(ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
 		dst_format |= BIT(15);
 
+	if (SDE_FORMAT_IS_DX(fmt))
+		dst_format |= BIT(21);
+
 	pattern = (fmt->element[3] << 24) |
 			(fmt->element[2] << 16) |
 			(fmt->element[1] << 8)  |
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 650ef84..716759c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -17,13 +17,19 @@
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
+
+#include "dsi_display.h"
+#include "dsi_drm.h"
+#include "sde_wb.h"
+
 #include "sde_kms.h"
 #include "sde_core_irq.h"
 #include "sde_formats.h"
-#include "sde_hw_mdss.h"
-#include "sde_hw_util.h"
-#include "sde_hw_intf.h"
 #include "sde_hw_vbif.h"
+#include "sde_vbif.h"
+#include "sde_encoder.h"
+#include "sde_plane.h"
+#include "sde_crtc.h"
 
 #define CREATE_TRACE_POINTS
 #include "sde_trace.h"
@@ -64,12 +70,110 @@
 module_param(sdecustom, bool, 0400);
 MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
 
+static int sde_kms_hw_init(struct msm_kms *kms);
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+
 bool sde_is_custom_client(void)
 {
 	return sdecustom;
 }
 
-static int sde_debugfs_show_regset32(struct seq_file *s, void *data)
+#ifdef CONFIG_DEBUG_FS
+static int _sde_danger_signal_status(struct seq_file *s,
+		bool danger_status)
+{
+	struct sde_kms *kms = (struct sde_kms *)s->private;
+	struct msm_drm_private *priv;
+	struct sde_danger_safe_status status;
+	int i;
+
+	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+		SDE_ERROR("invalid arg(s)\n");
+		return 0;
+	}
+
+	priv = kms->dev->dev_private;
+	memset(&status, 0, sizeof(struct sde_danger_safe_status));
+
+	sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	}
+	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+
+	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+				status.sspp[i]);
+	seq_puts(s, "\n");
+
+	for (i = WB_0; i < WB_MAX; i++)
+		seq_printf(s, "WB%d     :  0x%x  \t", i - WB_0,
+				status.wb[i]);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return _sde_danger_signal_status(s, true);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
+
+static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return _sde_danger_signal_status(s, false);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
+
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove_recursive(sde_kms->debugfs_danger);
+	sde_kms->debugfs_danger = NULL;
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	sde_kms->debugfs_danger = debugfs_create_dir("danger",
+			parent);
+	if (!sde_kms->debugfs_danger) {
+		SDE_ERROR("failed to create danger debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
+			sde_kms, &sde_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
+			sde_kms, &sde_debugfs_safe_stats_fops);
+
+	return 0;
+}
+
+static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
 {
 	struct sde_debugfs_regset32 *regset;
 	struct sde_kms *sde_kms;
@@ -123,9 +227,10 @@
 	return 0;
 }
 
-static int sde_debugfs_open_regset32(struct inode *inode, struct file *file)
+static int sde_debugfs_open_regset32(struct inode *inode,
+		struct file *file)
 {
-	return single_open(file, sde_debugfs_show_regset32, inode->i_private);
+	return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
 }
 
 static const struct file_operations sde_fops_regset32 = {
@@ -163,7 +268,7 @@
 	return sde_kms ? sde_kms->debugfs_root : 0;
 }
 
-static int sde_debugfs_init(struct sde_kms *sde_kms)
+static int _sde_debugfs_init(struct sde_kms *sde_kms)
 {
 	void *p;
 
@@ -180,17 +285,43 @@
 	/* allow debugfs_root to be NULL */
 	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
 			0644, sde_kms->debugfs_root, p);
+
+	/* create common folder for debug information */
+	sde_kms->debugfs_debug = debugfs_create_dir("debug",
+			sde_kms->debugfs_root);
+	if (!sde_kms->debugfs_debug)
+		SDE_ERROR("failed to create debugfs debug directory\n");
+
+	sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
+	sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
+
 	return 0;
 }
 
-static void sde_debugfs_destroy(struct sde_kms *sde_kms)
+static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
 {
 	/* don't need to NULL check debugfs_root */
 	if (sde_kms) {
+		sde_debugfs_vbif_destroy(sde_kms);
+		sde_debugfs_danger_destroy(sde_kms);
+		debugfs_remove_recursive(sde_kms->debugfs_debug);
+		sde_kms->debugfs_debug = 0;
 		debugfs_remove_recursive(sde_kms->debugfs_root);
 		sde_kms->debugfs_root = 0;
 	}
 }
+#else
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
 
 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
@@ -214,7 +345,7 @@
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 }
 
-static void sde_prepare_commit(struct msm_kms *kms,
+static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -224,42 +355,39 @@
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
 }
 
-static void sde_commit(struct msm_kms *kms, struct drm_atomic_state *old_state)
+static void sde_kms_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
-	MSM_EVT(sde_kms->dev, 0, 0);
-
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
-		if (crtc->state->active)
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		if (crtc->state->active) {
+			SDE_EVT32(DRMID(crtc));
 			sde_crtc_commit_kickoff(crtc);
+		}
+	}
 }
 
-static void sde_complete_commit(struct msm_kms *kms,
-		struct drm_atomic_state *state)
+static void sde_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
 	struct drm_device *dev = sde_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *conn_state;
+	struct drm_crtc_state *old_crtc_state;
 	int i;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i)
-		sde_crtc_complete_commit(crtc);
-	for_each_connector_in_state(state, connector, conn_state, i)
-		sde_connector_complete_commit(connector);
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		sde_crtc_complete_commit(crtc, old_crtc_state);
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
-	MSM_EVT(sde_kms->dev, 0, 0);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 }
 
-static void sde_wait_for_commit_done(struct msm_kms *kms,
+static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
 		struct drm_crtc *crtc)
 {
 	struct drm_encoder *encoder;
@@ -281,10 +409,6 @@
 		return;
 	}
 
-	 /* ref count the vblank event and interrupts while we wait for it */
-	if (sde_crtc_vblank(crtc, true))
-		return;
-
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
 			continue;
@@ -293,34 +417,289 @@
 		 * For example, wait for vsync in case of video mode panels
 		 * This should be a no-op for command mode panels
 		 */
-		MSM_EVT(crtc->dev, crtc->base.id, 0);
+		SDE_EVT32(DRMID(crtc));
 		ret = sde_encoder_wait_for_commit_done(encoder);
 		if (ret && ret != -EWOULDBLOCK) {
-			DRM_ERROR("wait for commit done returned %d\n", ret);
+			SDE_ERROR("wait for commit done returned %d\n", ret);
 			break;
 		}
 	}
-
-	 /* release vblank event ref count */
-	sde_crtc_vblank(crtc, false);
 }
 
 static void sde_kms_prepare_fence(struct msm_kms *kms,
-		struct drm_atomic_state *state)
+		struct drm_atomic_state *old_state)
 {
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *conn_state;
-	int i;
+	struct drm_crtc_state *old_crtc_state;
+	int i, rc;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i)
-		sde_crtc_prepare_fence(crtc);
-	for_each_connector_in_state(state, connector, conn_state, i)
-		sde_connector_prepare_fence(connector);
+	if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+		SDE_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+retry:
+	/* attempt to acquire ww mutex for connection */
+	rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
+			       old_state->acquire_ctx);
+
+	if (rc == -EDEADLK) {
+		drm_modeset_backoff(old_state->acquire_ctx);
+		goto retry;
+	}
+
+	/* old_state actually contains updated crtc pointers */
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		sde_crtc_prepare_commit(crtc, old_crtc_state);
 }
 
-static int modeset_init(struct sde_kms *sde_kms)
+/**
+ * _sde_kms_get_displays - query for underlying display handles and cache them
+ * @sde_kms:    Pointer to sde kms structure
+ * Returns:     Zero on success
+ */
+static int _sde_kms_get_displays(struct sde_kms *sde_kms)
+{
+	int rc = -ENOMEM;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde kms\n");
+		return -EINVAL;
+	}
+
+	/* dsi */
+	sde_kms->dsi_displays = NULL;
+	sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
+	if (sde_kms->dsi_display_count) {
+		sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
+				sizeof(void *),
+				GFP_KERNEL);
+		if (!sde_kms->dsi_displays) {
+			SDE_ERROR("failed to allocate dsi displays\n");
+			goto exit_deinit_dsi;
+		}
+		sde_kms->dsi_display_count =
+			dsi_display_get_active_displays(sde_kms->dsi_displays,
+					sde_kms->dsi_display_count);
+	}
+
+	/* wb */
+	sde_kms->wb_displays = NULL;
+	sde_kms->wb_display_count = sde_wb_get_num_of_displays();
+	if (sde_kms->wb_display_count) {
+		sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
+				sizeof(void *),
+				GFP_KERNEL);
+		if (!sde_kms->wb_displays) {
+			SDE_ERROR("failed to allocate wb displays\n");
+			goto exit_deinit_wb;
+		}
+		sde_kms->wb_display_count =
+			wb_display_get_displays(sde_kms->wb_displays,
+					sde_kms->wb_display_count);
+	}
+	return 0;
+
+exit_deinit_wb:
+	kfree(sde_kms->wb_displays);
+	sde_kms->wb_display_count = 0;
+	sde_kms->wb_displays = NULL;
+
+exit_deinit_dsi:
+	kfree(sde_kms->dsi_displays);
+	sde_kms->dsi_display_count = 0;
+	sde_kms->dsi_displays = NULL;
+	return rc;
+}
+
+/**
+ * _sde_kms_release_displays - release cache of underlying display handles
+ * @sde_kms:    Pointer to sde kms structure
+ */
+static void _sde_kms_release_displays(struct sde_kms *sde_kms)
+{
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde kms\n");
+		return;
+	}
+
+	kfree(sde_kms->wb_displays);
+	sde_kms->wb_displays = NULL;
+	sde_kms->wb_display_count = 0;
+
+	kfree(sde_kms->dsi_displays);
+	sde_kms->dsi_displays = NULL;
+	sde_kms->dsi_display_count = 0;
+}
+
+/**
+ * _sde_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @sde_kms:    Pointer to sde kms structure
+ * Returns:     Zero on success
+ */
+static int _sde_kms_setup_displays(struct drm_device *dev,
+		struct msm_drm_private *priv,
+		struct sde_kms *sde_kms)
+{
+	static const struct sde_connector_ops dsi_ops = {
+		.post_init =  dsi_conn_post_init,
+		.detect =     dsi_conn_detect,
+		.get_modes =  dsi_connector_get_modes,
+		.mode_valid = dsi_conn_mode_valid,
+		.get_info =   dsi_display_get_info,
+		.set_backlight = dsi_display_set_backlight
+	};
+	static const struct sde_connector_ops wb_ops = {
+		.post_init =    sde_wb_connector_post_init,
+		.detect =       sde_wb_connector_detect,
+		.get_modes =    sde_wb_connector_get_modes,
+		.set_property = sde_wb_connector_set_property,
+		.get_info =     sde_wb_get_info,
+	};
+	struct msm_display_info info;
+	struct drm_encoder *encoder;
+	void *display, *connector;
+	int i, max_encoders;
+	int rc = 0;
+
+	if (!dev || !priv || !sde_kms) {
+		SDE_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
+		max_encoders = ARRAY_SIZE(priv->encoders);
+		SDE_ERROR("capping number of displays to %d", max_encoders);
+	}
+
+	/* dsi */
+	for (i = 0; i < sde_kms->dsi_display_count &&
+		priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->dsi_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = dsi_display_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dsi get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("encoder init failed for dsi %d\n", i);
+			continue;
+		}
+
+		rc = dsi_display_drm_bridge_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					0,
+					display,
+					&dsi_ops,
+					DRM_CONNECTOR_POLL_HPD,
+					DRM_MODE_CONNECTOR_DSI);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("dsi %d connector init failed\n", i);
+			dsi_display_drm_bridge_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
+
+	/* wb */
+	for (i = 0; i < sde_kms->wb_display_count &&
+		priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->wb_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = sde_wb_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("wb get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("encoder init failed for wb %d\n", i);
+			continue;
+		}
+
+		rc = sde_wb_drm_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+				encoder,
+				0,
+				display,
+				&wb_ops,
+				DRM_CONNECTOR_POLL_HPD,
+				DRM_MODE_CONNECTOR_VIRTUAL);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("wb %d connector init failed\n", i);
+			sde_wb_drm_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
+
+	return 0;
+}
+
+static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	} else if (!sde_kms->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!sde_kms->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
+		return;
+	}
+	priv = sde_kms->dev->dev_private;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+	priv->num_crtcs = 0;
+
+	for (i = 0; i < priv->num_planes; i++)
+		priv->planes[i]->funcs->destroy(priv->planes[i]);
+	priv->num_planes = 0;
+
+	for (i = 0; i < priv->num_connectors; i++)
+		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+	priv->num_connectors = 0;
+
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+	priv->num_encoders = 0;
+
+	_sde_kms_release_displays(sde_kms);
+}
+
+static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
 {
 	struct drm_device *dev;
 	struct drm_plane *primary_planes[MAX_PLANES], *plane;
@@ -341,8 +720,12 @@
 	priv = dev->dev_private;
 	catalog = sde_kms->catalog;
 
-	/* Enumerate displays supported */
-	sde_encoders_init(dev);
+	/*
+	 * Query for underlying display drivers, and create connectors,
+	 * bridges and encoders for them.
+	 */
+	if (!_sde_kms_get_displays(sde_kms))
+		(void)_sde_kms_setup_displays(dev, priv, sde_kms);
 
 	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
 	max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
@@ -394,14 +777,10 @@
 
 	return 0;
 fail:
+	_sde_kms_drm_obj_destroy(sde_kms);
 	return ret;
 }
 
-static int sde_hw_init(struct msm_kms *kms)
-{
-	return 0;
-}
-
 static int sde_kms_postinit(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -422,17 +801,40 @@
 	return 0;
 }
 
-static long sde_round_pixclk(struct msm_kms *kms, unsigned long rate,
+static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
 		struct drm_encoder *encoder)
 {
 	return rate;
 }
 
-static void sde_destroy(struct msm_kms *kms)
+static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
+		struct platform_device *pdev)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
 	int i;
 
+	if (!sde_kms || !pdev)
+		return;
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return;
+
+	if (sde_kms->hw_intr)
+		sde_hw_intr_destroy(sde_kms->hw_intr);
+	sde_kms->hw_intr = NULL;
+
+	_sde_kms_release_displays(sde_kms);
+
+	/* safe to call these more than once during shutdown */
+	_sde_debugfs_destroy(sde_kms);
+	_sde_kms_mmu_destroy(sde_kms);
+
 	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
 		u32 vbif_idx = sde_kms->catalog->vbif[i].id;
 
@@ -440,9 +842,49 @@
 			sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
 	}
 
-	sde_debugfs_destroy(sde_kms);
-	sde_hw_intr_destroy(sde_kms->hw_intr);
-	sde_rm_destroy(&sde_kms->rm);
+	if (sde_kms->rm_init)
+		sde_rm_destroy(&sde_kms->rm);
+	sde_kms->rm_init = false;
+
+	if (sde_kms->catalog)
+		sde_hw_catalog_deinit(sde_kms->catalog);
+	sde_kms->catalog = NULL;
+
+	if (sde_kms->core_client)
+		sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
+	sde_kms->core_client = NULL;
+
+	if (sde_kms->vbif[VBIF_NRT])
+		msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
+	sde_kms->vbif[VBIF_NRT] = NULL;
+
+	if (sde_kms->vbif[VBIF_RT])
+		msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
+	sde_kms->vbif[VBIF_RT] = NULL;
+
+	if (sde_kms->mmio)
+		msm_iounmap(pdev, sde_kms->mmio);
+	sde_kms->mmio = NULL;
+}
+
+static void sde_kms_destroy(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	if (!dev) {
+		SDE_ERROR("invalid device\n");
+		return;
+	}
+
+	_sde_kms_hw_destroy(sde_kms, dev->platformdev);
 	kfree(sde_kms);
 }
 
@@ -458,7 +900,7 @@
 }
 
 static const struct msm_kms_funcs kms_funcs = {
-	.hw_init         = sde_hw_init,
+	.hw_init         = sde_kms_hw_init,
 	.postinit        = sde_kms_postinit,
 	.irq_preinstall  = sde_irq_preinstall,
 	.irq_postinstall = sde_irq_postinstall,
@@ -466,25 +908,46 @@
 	.irq             = sde_irq,
 	.preclose        = sde_kms_preclose,
 	.prepare_fence   = sde_kms_prepare_fence,
-	.prepare_commit  = sde_prepare_commit,
-	.commit          = sde_commit,
-	.complete_commit = sde_complete_commit,
-	.wait_for_crtc_commit_done = sde_wait_for_commit_done,
+	.prepare_commit  = sde_kms_prepare_commit,
+	.commit          = sde_kms_commit,
+	.complete_commit = sde_kms_complete_commit,
+	.wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
 	.get_format      = sde_get_msm_format,
-	.round_pixclk    = sde_round_pixclk,
-	.destroy         = sde_destroy,
+	.round_pixclk    = sde_kms_round_pixclk,
+	.destroy         = sde_kms_destroy,
 };
 
 /* the caller api needs to turn on clock before calling it */
-static void core_hw_rev_init(struct sde_kms *sde_kms)
+static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
 {
 	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
 }
 
-int sde_mmu_init(struct sde_kms *sde_kms)
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
+{
+	struct msm_mmu *mmu;
+	int i;
+
+	for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
+		if (!sde_kms->mmu[i])
+			continue;
+
+		mmu = sde_kms->mmu[i];
+		msm_unregister_mmu(sde_kms->dev, mmu);
+		mmu->funcs->detach(mmu, (const char **)iommu_ports,
+				ARRAY_SIZE(iommu_ports));
+		mmu->funcs->destroy(mmu);
+		sde_kms->mmu[i] = 0;
+		sde_kms->mmu_id[i] = 0;
+	}
+
+	return 0;
+}
+
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
 {
 	struct msm_mmu *mmu;
 	int i, ret;
@@ -493,14 +956,15 @@
 		mmu = msm_smmu_new(sde_kms->dev->dev, i);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
-			DRM_ERROR("failed to init iommu: %d\n", ret);
+			SDE_ERROR("failed to init iommu id %d: rc: %d\n", i,
+					ret);
 			goto fail;
 		}
 
 		ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
 				ARRAY_SIZE(iommu_ports));
 		if (ret) {
-			DRM_ERROR("failed to attach iommu: %d\n", ret);
+			SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
 			mmu->funcs->destroy(mmu);
 			goto fail;
 		}
@@ -508,7 +972,8 @@
 		sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
 		if (sde_kms->mmu_id[i] < 0) {
 			ret = sde_kms->mmu_id[i];
-			DRM_ERROR("failed to register sde iommu: %d\n", ret);
+			SDE_ERROR("failed to register sde iommu %d: %d\n",
+					i, ret);
 			mmu->funcs->detach(mmu, (const char **)iommu_ports,
 					ARRAY_SIZE(iommu_ports));
 			goto fail;
@@ -519,124 +984,104 @@
 
 	return 0;
 fail:
-	return ret;
+	_sde_kms_mmu_destroy(sde_kms);
 
+	return ret;
 }
 
-struct sde_kms *sde_hw_setup(struct platform_device *pdev)
+static int sde_kms_hw_init(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
-	int ret = 0;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int i, rc = -EINVAL;
 
-	sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
-	if (!sde_kms)
-		return ERR_PTR(-ENOMEM);
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		goto end;
+	}
 
-	sde_kms->mmio = msm_ioremap(pdev, "mdp_phys", "SDE");
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	if (!dev || !dev->platformdev) {
+		SDE_ERROR("invalid device\n");
+		goto end;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		SDE_ERROR("invalid private data\n");
+		goto end;
+	}
+
+	sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "SDE");
 	if (IS_ERR(sde_kms->mmio)) {
-		SDE_ERROR("mdp register memory map failed\n");
-		ret = PTR_ERR(sde_kms->mmio);
-		goto err;
+		rc = PTR_ERR(sde_kms->mmio);
+		SDE_ERROR("mdp register memory map failed: %d\n", rc);
+		sde_kms->mmio = NULL;
+		goto error;
 	}
 	DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
 
-	sde_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif_phys", "VBIF");
+	sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev,
+			"vbif_phys", "VBIF");
 	if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
-		SDE_ERROR("vbif register memory map failed\n");
-		ret = PTR_ERR(sde_kms->vbif[VBIF_RT]);
-		goto vbif_map_err;
+		rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
+		SDE_ERROR("vbif register memory map failed: %d\n", rc);
+		sde_kms->vbif[VBIF_RT] = NULL;
+		goto error;
 	}
 
-	sde_kms->vbif[VBIF_NRT] = msm_ioremap(pdev, "vbif_nrt_phys",
-		"VBIF_NRT");
+	sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev,
+			"vbif_nrt_phys", "VBIF_NRT");
 	if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
 		sde_kms->vbif[VBIF_NRT] = NULL;
 		SDE_DEBUG("VBIF NRT is not defined");
 	}
 
-	/* junk API - no error return for init api */
-	msm_kms_init(&sde_kms->base, &kms_funcs);
-	if (ret) {
-		SDE_ERROR("mdp/kms hw init failed\n");
-		goto kms_init_err;
-	}
-
-	SDE_DEBUG("sde hw setup successful\n");
-	return sde_kms;
-
-kms_init_err:
-	if (sde_kms->vbif[VBIF_NRT])
-		iounmap(sde_kms->vbif[VBIF_NRT]);
-	iounmap(sde_kms->vbif[VBIF_RT]);
-vbif_map_err:
-	iounmap(sde_kms->mmio);
-err:
-	kfree(sde_kms);
-	return ERR_PTR(ret);
-}
-
-static void sde_hw_destroy(struct sde_kms *sde_kms)
-{
-	if (sde_kms->vbif[VBIF_NRT])
-		iounmap(sde_kms->vbif[VBIF_NRT]);
-	iounmap(sde_kms->vbif[VBIF_RT]);
-	iounmap(sde_kms->mmio);
-	kfree(sde_kms);
-}
-
-struct msm_kms *sde_kms_init(struct drm_device *dev)
-{
-	struct msm_drm_private *priv;
-	struct sde_kms *sde_kms;
-	int i;
-	int rc;
-
-	if (!dev) {
-		SDE_ERROR("drm device node invalid\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	priv = dev->dev_private;
-	sde_kms = sde_hw_setup(dev->platformdev);
-	if (IS_ERR_OR_NULL(sde_kms)) {
-		SDE_ERROR("sde hw setup failed\n");
-		rc = PTR_ERR(sde_kms);
-		goto end;
-	}
-
-	sde_kms->dev = dev;
-	priv->kms = &sde_kms->base;
-
 	sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
 	if (IS_ERR_OR_NULL(sde_kms->core_client)) {
-		SDE_ERROR("sde power client create failed\n");
-		rc = -EINVAL;
-		goto kms_destroy;
+		rc = PTR_ERR(sde_kms->core_client);
+		SDE_ERROR("sde power client create failed: %d\n", rc);
+		sde_kms->core_client = NULL;
+		goto error;
 	}
 
 	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
 		true);
 	if (rc) {
-		SDE_ERROR("resource enable failed\n");
-		goto clk_rate_err;
+		SDE_ERROR("resource enable failed: %d\n", rc);
+		goto error;
 	}
 
-	core_hw_rev_init(sde_kms);
+	_sde_kms_core_hw_rev_init(sde_kms);
 
 	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
 
 	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
 	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
-		SDE_ERROR("catalog init failed\n");
 		rc = PTR_ERR(sde_kms->catalog);
-		goto catalog_err;
+		SDE_ERROR("catalog init failed: %d\n", rc);
+		sde_kms->catalog = NULL;
+		goto power_error;
 	}
 
 	rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
 			sde_kms->dev);
-	if (rc)
-		goto catalog_err;
+	if (rc) {
+		SDE_ERROR("rm init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	sde_kms->rm_init = true;
+
+	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
+	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
+		rc = PTR_ERR(sde_kms->hw_mdp);
+		SDE_ERROR("failed to get hw_mdp: %d\n", rc);
+		sde_kms->hw_mdp = NULL;
+		goto power_error;
+	}
 
 	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
 		u32 vbif_idx = sde_kms->catalog->vbif[i].id;
@@ -644,43 +1089,42 @@
 		sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
 				sde_kms->vbif[vbif_idx], sde_kms->catalog);
 		if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
-			SDE_ERROR("failed to init vbif %d\n", vbif_idx);
+			rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+			SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
 			sde_kms->hw_vbif[vbif_idx] = NULL;
-			goto catalog_err;
+			goto power_error;
 		}
 	}
 
-	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
-	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
-		SDE_ERROR("failed to get hw_mdp\n");
-		sde_kms->hw_mdp = NULL;
-		goto catalog_err;
-	}
-
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr))
-		goto catalog_err;
-
 	/*
 	 * Now we need to read the HW catalog and initialize resources such as
 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
 	 */
-	sde_mmu_init(sde_kms);
+	rc = _sde_kms_mmu_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
 
 	/*
 	 * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
 	 *       'primary' is already created.
 	 */
-	sde_debugfs_init(sde_kms);
-	msm_evtlog_init(&priv->evtlog, SDE_EVTLOG_SIZE,
-			sde_debugfs_get_root(sde_kms));
-	MSM_EVT(dev, 0, 0);
+	rc = _sde_debugfs_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("sde_debugfs init failed: %d\n", rc);
+		goto power_error;
+	}
 
 	/*
-	 * modeset_init should create the DRM related objects i.e. CRTCs,
-	 * planes, encoders, connectors and so forth
+	 * _sde_kms_drm_obj_init should create the DRM related objects
+	 * i.e. CRTCs, planes, encoders, connectors and so forth
 	 */
-	modeset_init(sde_kms);
+	rc = _sde_kms_drm_obj_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("modeset init failed: %d\n", rc);
+		goto power_error;
+	}
 
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
@@ -697,16 +1141,47 @@
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+		rc = PTR_ERR(sde_kms->hw_intr);
+		SDE_ERROR("hw_intr init failed: %d\n", rc);
+		sde_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+	return 0;
+
+hw_intr_init_err:
+	_sde_kms_drm_obj_destroy(sde_kms);
+power_error:
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+error:
+	_sde_kms_hw_destroy(sde_kms, dev->platformdev);
+end:
+	return rc;
+}
+
+struct msm_kms *sde_kms_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!dev || !dev->dev_private) {
+		SDE_ERROR("drm device node invalid\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+
+	sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
+	if (!sde_kms) {
+		SDE_ERROR("failed to allocate sde kms\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	msm_kms_init(&sde_kms->base, &kms_funcs);
+	sde_kms->dev = dev;
 
 	return &sde_kms->base;
-
-catalog_err:
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-clk_rate_err:
-	sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
-kms_destroy:
-	sde_hw_destroy(sde_kms);
-end:
-	return ERR_PTR(rc);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d1ec5c0..cd9d103 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -16,18 +16,19 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
+#include "sde_dbg.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_ctl.h"
 #include "sde_hw_lm.h"
 #include "sde_hw_interrupts.h"
 #include "sde_hw_wb.h"
 #include "sde_hw_top.h"
-#include "sde_connector.h"
-#include "sde_crtc.h"
 #include "sde_rm.h"
 #include "sde_power_handle.h"
 #include "sde_irq.h"
 
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
 /**
  * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs
  * @fmt: Pointer to format string
@@ -80,10 +81,12 @@
 
 /*
  * struct sde_irq_callback - IRQ callback handlers
+ * @list: list to callback
  * @func: intr handler
  * @arg: argument for the handler
  */
 struct sde_irq_callback {
+	struct list_head list;
 	void (*func)(void *arg, int irq_idx);
 	void *arg;
 };
@@ -92,26 +95,17 @@
  * struct sde_irq: IRQ structure contains callback registration info
  * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
  * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
  * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
  */
 struct sde_irq {
 	u32 total_irqs;
-	struct sde_irq_callback *irq_cb_tbl;
+	struct list_head *irq_cb_tbl;
+	atomic_t *enable_counts;
+	atomic_t *irq_counts;
 	spinlock_t cb_lock;
-};
-
-/**
- * Encoder functions and data types
- * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
- * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
- * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
- * @display_num_of_h_tiles:
- */
-struct sde_encoder_hw_resources {
-	enum sde_intf_mode intfs[INTF_MAX];
-	enum sde_intf_mode wbs[WB_MAX];
-	bool needs_cdm;
-	u32 display_num_of_h_tiles;
+	struct dentry *debugfs_file;
 };
 
 struct sde_kms {
@@ -126,6 +120,9 @@
 
 	/* directory entry for debugfs */
 	void *debugfs_root;
+	struct dentry *debugfs_debug;
+	struct dentry *debugfs_danger;
+	struct dentry *debugfs_vbif;
 
 	/* io/register spaces: */
 	void __iomem *mmio, *vbif[VBIF_MAX];
@@ -140,9 +137,16 @@
 	struct sde_irq irq_obj;
 
 	struct sde_rm rm;
+	bool rm_init;
 
 	struct sde_hw_vbif *hw_vbif[VBIF_MAX];
 	struct sde_hw_mdp *hw_mdp;
+	int dsi_display_count;
+	void **dsi_displays;
+	int wb_display_count;
+	void **wb_displays;
+
+	bool has_danger_ctrl;
 };
 
 struct vsync_info {
@@ -152,42 +156,6 @@
 
 #define to_sde_kms(x) container_of(x, struct sde_kms, base)
 
-struct sde_plane_state {
-	struct drm_plane_state base;
-
-	/* aligned with property */
-	uint64_t property_values[PLANE_PROP_COUNT];
-
-	/* blob properties */
-	struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
-
-	/* dereferenced input fence pointer */
-	void *input_fence;
-
-	/* assigned by crtc blender */
-	enum sde_stage stage;
-
-	/* bitmask for which pipe h/w config functions need to be updated */
-	uint32_t dirty;
-
-	/* whether the current update is still pending */
-	bool pending : 1;
-};
-
-#define to_sde_plane_state(x) \
-	container_of(x, struct sde_plane_state, base)
-
-/**
- * sde_plane_get_property - Query integer value of plane property
- *
- * @S: Pointer to plane state
- * @X: Property index, from enum msm_mdp_plane_property
- *
- * Return: Integer value of requested property
- */
-#define sde_plane_get_property(S, X) \
-	((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
-
 /**
  * sde_is_custom_client - whether or not to enable non-standard customizations
  *
@@ -391,82 +359,4 @@
 int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
-/**
- * Plane functions
- */
-enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
-void sde_plane_flush(struct drm_plane *plane);
-struct drm_plane *sde_plane_init(struct drm_device *dev,
-		uint32_t pipe, bool primary_plane,
-		unsigned long possible_crtcs);
-
-/**
- * sde_plane_wait_input_fence - wait for input fence object
- * @plane:   Pointer to DRM plane object
- * @wait_ms: Wait timeout value
- * Returns: Zero on success
- */
-int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
-
-/**
- * sde_plane_color_fill - Enables color fill on plane
- * @plane:  Pointer to DRM plane object
- * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
- * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
- *
- * Returns: 0 on success
- */
-int sde_plane_color_fill(struct drm_plane *plane,
-		uint32_t color, uint32_t alpha);
-
-/**
- * sde_encoder_get_hw_resources - Populate table of required hardware resources
- * @encoder:	encoder pointer
- * @hw_res:	resource table to populate with encoder required resources
- * @conn_state:	report hw reqs based on this proposed connector state
- */
-void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
-		struct sde_encoder_hw_resources *hw_res,
-		struct drm_connector_state *conn_state);
-
-/**
- * sde_encoder_register_vblank_callback - provide callback to encoder that
- *	will be called on the next vblank.
- * @encoder:	encoder pointer
- * @cb:		callback pointer, provide NULL to deregister and disable IRQs
- * @data:	user data provided to callback
- */
-void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
-		void (*cb)(void *), void *data);
-
-/**
- * sde_encoder_schedule_kickoff - Register a callback with the encoder to
- *	trigger a double buffer flip of the ctl path (i.e. ctl flush and start)
- *	at the appropriate time.
- *	Immediately: if no previous commit is outstanding.
- *	Delayed: Save the callback, and return. Does not block. Callback will
- *	be triggered later. E.g. cmd encoder will trigger at pp_done irq
- *	irq if it outstanding.
- * @encoder:	encoder pointer
- */
-void sde_encoder_schedule_kickoff(struct drm_encoder *encoder);
-
-/**
- * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
- *	current pending frames to hardware at a vblank or ctl_start
- *	Encoders will map this differently depending on irqs
- *	vid mode -> vsync_irq
- * @encoder:	encoder pointer
- *
- * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
- */
-int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
-
-/**
- * sde_encoders_init - query platform, create all encoders and bridges,
- *	and register them with the drm_device
- * @dev:	drm device pointer
- */
-void sde_encoders_init(struct drm_device *dev);
-
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 5257b8d..8cf15c5 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -14,6 +14,7 @@
 
 #include <linux/debugfs.h>
 #include <uapi/drm/sde_drm.h>
+#include <uapi/drm/msm_drm_pp.h>
 
 #include "msm_prop.h"
 
@@ -24,6 +25,8 @@
 #include "sde_trace.h"
 #include "sde_crtc.h"
 #include "sde_vbif.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
 
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
@@ -86,7 +89,7 @@
 	struct sde_hw_pipe *pipe_hw;
 	struct sde_hw_pipe_cfg pipe_cfg;
 	struct sde_hw_sharp_cfg sharp_cfg;
-	struct sde_hw_scaler3_cfg scaler3_cfg;
+	struct sde_hw_scaler3_cfg *scaler3_cfg;
 	struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
 	uint32_t color_fill;
 	bool is_error;
@@ -220,36 +223,6 @@
 }
 
 /**
- * _sde_plane_is_rt_pipe - check if the given plane requires real-time QoS
- * @plane:		Pointer to drm plane
- * @crtc:		Pointer to drm crtc associated with the given plane
- */
-static bool _sde_plane_is_rt_pipe(struct drm_plane *plane,
-		struct drm_crtc *crtc)
-{
-	struct sde_plane *psde = to_sde_plane(plane);
-	struct drm_connector *connector;
-	bool is_rt = false;
-
-	/* check if this plane has a physical connector interface */
-	mutex_lock(&plane->dev->mode_config.mutex);
-	drm_for_each_connector(connector, plane->dev)
-		if (connector->state &&
-				(connector->state->crtc == crtc) &&
-				(connector->connector_type
-					!= DRM_MODE_CONNECTOR_VIRTUAL)) {
-			is_rt = true;
-			break;
-		}
-	mutex_unlock(&plane->dev->mode_config.mutex);
-
-	SDE_DEBUG("plane%u: pnum:%d rt:%d\n",
-			plane->base.id, psde->pipe - SSPP_VIG0, is_rt);
-
-	return is_rt;
-}
-
-/**
  * _sde_plane_set_qos_lut - set QoS LUT of the given plane
  * @plane:		Pointer to drm plane
  * @fb:			Pointer to framebuffer associated with the given plane
@@ -420,18 +393,52 @@
 		psde->pipe_qos_cfg.danger_safe_en = false;
 	}
 
-	SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x]\n",
+	SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
 		plane->base.id,
 		psde->pipe - SSPP_VIG0,
 		psde->pipe_qos_cfg.danger_safe_en,
 		psde->pipe_qos_cfg.vblank_en,
 		psde->pipe_qos_cfg.creq_vblank,
-		psde->pipe_qos_cfg.danger_vblank);
+		psde->pipe_qos_cfg.danger_vblank,
+		psde->is_rt_pipe);
 
 	psde->pipe_hw->ops.setup_qos_ctrl(psde->pipe_hw,
 			&psde->pipe_qos_cfg);
 }
 
+int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+	struct sde_plane *psde;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+
+	if (!psde->is_rt_pipe)
+		goto end;
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+	_sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+end:
+	return 0;
+}
+
 /**
  * _sde_plane_set_ot_limit - set OT limit for the given plane
  * @plane:		Pointer to drm plane
@@ -519,10 +526,7 @@
 			prefix = sde_sync_get_name_prefix(input_fence);
 			ret = sde_sync_wait(input_fence, wait_ms);
 
-			MSM_EVT(plane->dev,
-				plane->base.id,
-				(uint64_t)-ret << (sizeof(uint32_t) * CHAR_BIT)
-				| prefix);
+			SDE_EVT32(DRMID(plane), -ret, prefix);
 
 			switch (ret) {
 			case 0:
@@ -576,6 +580,29 @@
 		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
 }
 
+static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
+		struct sde_plane_state *pstate)
+{
+	struct sde_hw_scaler3_cfg *cfg = psde->scaler3_cfg;
+	int ret = 0;
+
+	cfg->dir_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->dir_len,
+			PLANE_PROP_SCALER_LUT_ED);
+	cfg->cir_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->cir_len,
+			PLANE_PROP_SCALER_LUT_CIR);
+	cfg->sep_lut = msm_property_get_blob(
+			&psde->property_info,
+			pstate->property_blobs, &cfg->sep_len,
+			PLANE_PROP_SCALER_LUT_SEP);
+	if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
+		ret = -ENODATA;
+	return ret;
+}
+
 static void _sde_plane_setup_scaler3(struct sde_plane *psde,
 		uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
 		struct sde_hw_scaler3_cfg *scale_cfg,
@@ -748,6 +775,20 @@
 		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
 		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
 	};
+	static const struct sde_csc_cfg sde_csc10_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+			},
+		/* signed bias */
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+	};
 
 	if (!psde) {
 		SDE_ERROR("invalid plane\n");
@@ -757,6 +798,8 @@
 	/* revert to kernel default if override not available */
 	if (psde->csc_usr_ptr)
 		psde->csc_ptr = psde->csc_usr_ptr;
+	else if (BIT(SDE_SSPP_CSC_10BIT) & psde->features)
+		psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
 	else
 		psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
 
@@ -766,13 +809,66 @@
 			psde->csc_ptr->csc_mv[2]);
 }
 
+static void sde_color_process_plane_setup(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
+	uint32_t hue, saturation, value, contrast;
+	struct drm_msm_memcol *memcol = NULL;
+	size_t memcol_sz = 0;
+
+	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
+
+	hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
+	if (psde->pipe_hw->ops.setup_pa_hue)
+		psde->pipe_hw->ops.setup_pa_hue(psde->pipe_hw, &hue);
+	saturation = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_SATURATION_ADJUST);
+	if (psde->pipe_hw->ops.setup_pa_sat)
+		psde->pipe_hw->ops.setup_pa_sat(psde->pipe_hw, &saturation);
+	value = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_VALUE_ADJUST);
+	if (psde->pipe_hw->ops.setup_pa_val)
+		psde->pipe_hw->ops.setup_pa_val(psde->pipe_hw, &value);
+	contrast = (uint32_t) sde_plane_get_property(pstate,
+		PLANE_PROP_CONTRAST_ADJUST);
+	if (psde->pipe_hw->ops.setup_pa_cont)
+		psde->pipe_hw->ops.setup_pa_cont(psde->pipe_hw, &contrast);
+
+	if (psde->pipe_hw->ops.setup_pa_memcolor) {
+		/* Skin memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_SKIN_COLOR);
+		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+					MEMCOLOR_SKIN, memcol);
+
+		/* Sky memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_SKY_COLOR);
+		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+					MEMCOLOR_SKY, memcol);
+
+		/* Foliage memory color setup */
+		memcol = msm_property_get_blob(&psde->property_info,
+					pstate->property_blobs,
+					&memcol_sz,
+					PLANE_PROP_FOLIAGE_COLOR);
+		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+					MEMCOLOR_FOLIAGE, memcol);
+	}
+}
+
 static void _sde_plane_setup_scaler(struct sde_plane *psde,
 		const struct sde_format *fmt,
 		struct sde_plane_state *pstate)
 {
 	struct sde_hw_pixel_ext *pe;
 	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
-	uint32_t tmp, i;
 
 	if (!psde || !fmt) {
 		SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
@@ -795,49 +891,53 @@
 
 	/* update scaler */
 	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-		if (!psde->pixel_ext_usr) {
+		int error;
+
+		error = _sde_plane_setup_scaler3_lut(psde, pstate);
+		if (error || !psde->pixel_ext_usr) {
 			/* calculate default config for QSEED3 */
 			_sde_plane_setup_scaler3(psde,
 					psde->pipe_cfg.src_rect.w,
 					psde->pipe_cfg.src_rect.h,
 					psde->pipe_cfg.dst_rect.w,
 					psde->pipe_cfg.dst_rect.h,
-					&psde->scaler3_cfg, fmt,
+					psde->scaler3_cfg, fmt,
 					chroma_subsmpl_h, chroma_subsmpl_v);
 		}
 	} else if (!psde->pixel_ext_usr) {
+		uint32_t deci_dim, i;
+
 		/* calculate default configuration for QSEED2 */
 		memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
 
 		SDE_DEBUG_PLANE(psde, "default config\n");
+		deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
+				psde->pipe_cfg.horz_decimation);
 		_sde_plane_setup_scaler2(psde,
-				psde->pipe_cfg.src_rect.w,
+				deci_dim,
 				psde->pipe_cfg.dst_rect.w,
 				pe->phase_step_x,
 				pe->horz_filter, fmt, chroma_subsmpl_h);
-		_sde_plane_setup_scaler2(psde,
-				psde->pipe_cfg.src_rect.h,
-				psde->pipe_cfg.dst_rect.h,
-				pe->phase_step_y,
-				pe->vert_filter, fmt, chroma_subsmpl_v);
 
-		/* calculate left/right/top/bottom pixel extensions */
-		tmp = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
-				psde->pipe_cfg.horz_decimation);
 		if (SDE_FORMAT_IS_YUV(fmt))
-			tmp &= ~0x1;
+			deci_dim &= ~0x1;
 		_sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.w,
-				psde->pipe_cfg.dst_rect.w, tmp,
+				psde->pipe_cfg.dst_rect.w, deci_dim,
 				pe->phase_step_x,
 				pe->roi_w,
 				pe->num_ext_pxls_left,
 				pe->num_ext_pxls_right, pe->horz_filter, fmt,
 				chroma_subsmpl_h, 0);
 
-		tmp = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.h,
+		deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.h,
 				psde->pipe_cfg.vert_decimation);
+		_sde_plane_setup_scaler2(psde,
+				deci_dim,
+				psde->pipe_cfg.dst_rect.h,
+				pe->phase_step_y,
+				pe->vert_filter, fmt, chroma_subsmpl_v);
 		_sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.h,
-				psde->pipe_cfg.dst_rect.h, tmp,
+				psde->pipe_cfg.dst_rect.h, deci_dim,
 				pe->phase_step_y,
 				pe->roi_h,
 				pe->num_ext_pxls_top,
@@ -917,7 +1017,8 @@
 
 		if (psde->pipe_hw->ops.setup_rects)
 			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
-					&psde->pipe_cfg, &psde->pixel_ext);
+					&psde->pipe_cfg, &psde->pixel_ext,
+					psde->scaler3_cfg);
 	}
 
 	return 0;
@@ -961,6 +1062,7 @@
 	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
 		switch (idx) {
 		case PLANE_PROP_SCALER_V1:
+		case PLANE_PROP_SCALER_V2:
 		case PLANE_PROP_H_DECIMATE:
 		case PLANE_PROP_V_DECIMATE:
 		case PLANE_PROP_SRC_CONFIG:
@@ -1001,7 +1103,7 @@
 		return 0;
 	pstate->pending = true;
 
-	psde->is_rt_pipe = _sde_plane_is_rt_pipe(plane, crtc);
+	psde->is_rt_pipe = sde_crtc_is_rt(crtc);
 	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
 
 	/* update roi config */
@@ -1041,7 +1143,8 @@
 			_sde_plane_setup_scaler(psde, fmt, pstate);
 
 			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
-					&psde->pipe_cfg, &psde->pixel_ext);
+					&psde->pipe_cfg, &psde->pixel_ext,
+					psde->scaler3_cfg);
 		}
 	}
 
@@ -1067,6 +1170,8 @@
 			psde->csc_ptr = 0;
 	}
 
+	sde_color_process_plane_setup(plane);
+
 	/* update sharpening */
 	if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
 		psde->pipe_hw->ops.setup_sharpening) {
@@ -1256,7 +1361,8 @@
 
 	if (SDE_FORMAT_IS_YUV(fmt) &&
 		(!(psde->features & SDE_SSPP_SCALER) ||
-		 !(psde->features & BIT(SDE_SSPP_CSC)))) {
+		 !(psde->features & (BIT(SDE_SSPP_CSC)
+		 | BIT(SDE_SSPP_CSC_10BIT))))) {
 		SDE_ERROR_PLANE(psde,
 				"plane doesn't have scaler/csc for yuv\n");
 		ret = -EINVAL;
@@ -1414,6 +1520,7 @@
 	struct sde_plane *psde = to_sde_plane(plane);
 	int zpos_max = 255;
 	int zpos_def = 0;
+	char feature_name[256];
 
 	if (!plane || !psde) {
 		SDE_ERROR("invalid plane\n");
@@ -1428,8 +1535,12 @@
 	}
 
 	if (sde_is_custom_client()) {
-		if (catalog->mixer_count && catalog->mixer)
-			zpos_max = catalog->mixer[0].sblk->maxblendstages;
+		if (catalog->mixer_count && catalog->mixer &&
+				catalog->mixer[0].sblk->maxblendstages) {
+			zpos_max = catalog->mixer[0].sblk->maxblendstages - 1;
+			if (zpos_max > SDE_STAGE_MAX - SDE_STAGE_0 - 1)
+				zpos_max = SDE_STAGE_MAX - SDE_STAGE_0 - 1;
+		}
 	} else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
 		/* reserve zpos == 0 for primary planes */
 		zpos_def = drm_plane_index(plane) + 1;
@@ -1457,7 +1568,16 @@
 				PLANE_PROP_V_DECIMATE);
 	}
 
-	if (psde->features & SDE_SSPP_SCALER) {
+	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
+		msm_property_install_blob(&psde->property_info, "lut_ed", 0,
+			PLANE_PROP_SCALER_LUT_ED);
+		msm_property_install_blob(&psde->property_info, "lut_cir", 0,
+			PLANE_PROP_SCALER_LUT_CIR);
+		msm_property_install_blob(&psde->property_info, "lut_sep", 0,
+			PLANE_PROP_SCALER_LUT_SEP);
+	} else if (psde->features & SDE_SSPP_SCALER) {
 		msm_property_install_volatile_range(&psde->property_info,
 			"scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
 	}
@@ -1467,6 +1587,33 @@
 			"csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
 	}
 
+	if (psde->features & BIT(SDE_SSPP_HSIC)) {
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_HUE_V",
+			psde->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_HUE_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SATURATION_V",
+			psde->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_SATURATION_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_VALUE_V",
+			psde->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_VALUE_ADJUST);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_CONTRAST_V",
+			psde->pipe_sblk->hsic_blk.version >> 16);
+		msm_property_install_range(&psde->property_info,
+			feature_name, 0, 0, 0xFFFFFFFF, 0,
+			PLANE_PROP_CONTRAST_ADJUST);
+	}
+
 	/* standard properties */
 	msm_property_install_rotation(&psde->property_info,
 		BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION);
@@ -1517,6 +1664,24 @@
 			info->data, info->len, PLANE_PROP_INFO);
 
 	kfree(info);
+
+	if (psde->features & BIT(SDE_SSPP_MEMCOLOR)) {
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SKIN_COLOR_V",
+			psde->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_SKIN_COLOR);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_SKY_COLOR_V",
+			psde->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_SKY_COLOR);
+		snprintf(feature_name, sizeof(feature_name), "%s%d",
+			"SDE_SSPP_FOLIAGE_COLOR_V",
+			psde->pipe_sblk->memcolor_blk.version >> 16);
+		msm_property_install_blob(&psde->property_info, feature_name, 0,
+			PLANE_PROP_FOLIAGE_COLOR);
+	}
 }
 
 static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
@@ -1589,21 +1754,107 @@
 		pe->vert_filter[i] = scale_v1.vert_filter[i];
 	}
 	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		pe->num_ext_pxls_left[i] = scale_v1.lr.num_pxls_start[i];
-		pe->num_ext_pxls_right[i] = scale_v1.lr.num_pxls_end[i];
-		pe->left_ftch[i] = scale_v1.lr.ftch_start[i];
-		pe->right_ftch[i] = scale_v1.lr.ftch_end[i];
-		pe->left_rpt[i] = scale_v1.lr.rpt_start[i];
-		pe->right_rpt[i] = scale_v1.lr.rpt_end[i];
-		pe->roi_w[i] = scale_v1.lr.roi[i];
+		pe->left_ftch[i] = scale_v1.pe.left_ftch[i];
+		pe->right_ftch[i] = scale_v1.pe.right_ftch[i];
+		pe->left_rpt[i] = scale_v1.pe.left_rpt[i];
+		pe->right_rpt[i] = scale_v1.pe.right_rpt[i];
+		pe->roi_w[i] = scale_v1.pe.num_ext_pxls_lr[i];
 
-		pe->num_ext_pxls_top[i] = scale_v1.tb.num_pxls_start[i];
-		pe->num_ext_pxls_btm[i] = scale_v1.tb.num_pxls_end[i];
-		pe->top_ftch[i] = scale_v1.tb.ftch_start[i];
-		pe->btm_ftch[i] = scale_v1.tb.ftch_end[i];
-		pe->top_rpt[i] = scale_v1.tb.rpt_start[i];
-		pe->btm_rpt[i] = scale_v1.tb.rpt_end[i];
-		pe->roi_h[i] = scale_v1.tb.roi[i];
+		pe->top_ftch[i] = scale_v1.pe.top_ftch[i];
+		pe->btm_ftch[i] = scale_v1.pe.btm_ftch[i];
+		pe->top_rpt[i] = scale_v1.pe.top_rpt[i];
+		pe->btm_rpt[i] = scale_v1.pe.btm_rpt[i];
+		pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
+	}
+
+	psde->pixel_ext_usr = true;
+
+	SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
+		struct sde_plane_state *pstate, void *usr)
+{
+	struct sde_drm_scaler_v2 scale_v2;
+	struct sde_hw_pixel_ext *pe;
+	int i;
+	struct sde_hw_scaler3_cfg *cfg;
+
+	if (!psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+
+	cfg = psde->scaler3_cfg;
+	psde->pixel_ext_usr = false;
+	if (!usr) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) {
+		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+		return;
+	}
+
+	/* populate from user space */
+	pe = &(psde->pixel_ext);
+	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+	cfg->enable = scale_v2.enable;
+	cfg->dir_en = scale_v2.dir_en;
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		cfg->init_phase_x[i] = scale_v2.init_phase_x[i];
+		cfg->phase_step_x[i] = scale_v2.phase_step_x[i];
+		cfg->init_phase_y[i] = scale_v2.init_phase_y[i];
+		cfg->phase_step_y[i] = scale_v2.phase_step_y[i];
+
+		cfg->preload_x[i] = scale_v2.preload_x[i];
+		cfg->preload_y[i] = scale_v2.preload_y[i];
+		cfg->src_width[i] = scale_v2.src_width[i];
+		cfg->src_height[i] = scale_v2.src_height[i];
+	}
+	cfg->dst_width = scale_v2.dst_width;
+	cfg->dst_height = scale_v2.dst_height;
+
+	cfg->y_rgb_filter_cfg = scale_v2.y_rgb_filter_cfg;
+	cfg->uv_filter_cfg = scale_v2.uv_filter_cfg;
+	cfg->alpha_filter_cfg = scale_v2.alpha_filter_cfg;
+	cfg->blend_cfg = scale_v2.blend_cfg;
+
+	cfg->lut_flag = scale_v2.lut_flag;
+	cfg->dir_lut_idx = scale_v2.dir_lut_idx;
+	cfg->y_rgb_cir_lut_idx = scale_v2.y_rgb_cir_lut_idx;
+	cfg->uv_cir_lut_idx = scale_v2.uv_cir_lut_idx;
+	cfg->y_rgb_sep_lut_idx = scale_v2.y_rgb_sep_lut_idx;
+	cfg->uv_sep_lut_idx = scale_v2.uv_sep_lut_idx;
+
+	cfg->de.enable = scale_v2.de.enable;
+	cfg->de.sharpen_level1 = scale_v2.de.sharpen_level1;
+	cfg->de.sharpen_level2 = scale_v2.de.sharpen_level2;
+	cfg->de.clip = scale_v2.de.clip;
+	cfg->de.limit = scale_v2.de.limit;
+	cfg->de.thr_quiet = scale_v2.de.thr_quiet;
+	cfg->de.thr_dieout = scale_v2.de.thr_dieout;
+	cfg->de.thr_low = scale_v2.de.thr_low;
+	cfg->de.thr_high = scale_v2.de.thr_high;
+	cfg->de.prec_shift = scale_v2.de.prec_shift;
+	for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
+		cfg->de.adjust_a[i] = scale_v2.de.adjust_a[i];
+		cfg->de.adjust_b[i] = scale_v2.de.adjust_b[i];
+		cfg->de.adjust_c[i] = scale_v2.de.adjust_c[i];
+	}
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
+		pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
+		pe->left_rpt[i] = scale_v2.pe.left_rpt[i];
+		pe->right_rpt[i] = scale_v2.pe.right_rpt[i];
+		pe->roi_w[i] = scale_v2.pe.num_ext_pxls_lr[i];
+
+		pe->top_ftch[i] = scale_v2.pe.top_ftch[i];
+		pe->btm_ftch[i] = scale_v2.pe.btm_ftch[i];
+		pe->top_rpt[i] = scale_v2.pe.top_rpt[i];
+		pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
+		pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
 	}
 	psde->pixel_ext_usr = true;
 
@@ -1642,6 +1893,10 @@
 			case PLANE_PROP_SCALER_V1:
 				_sde_plane_set_scaler_v1(psde, (void *)val);
 				break;
+			case PLANE_PROP_SCALER_V2:
+				_sde_plane_set_scaler_v2(psde, pstate,
+					(void *)val);
+				break;
 			default:
 				/* nothing to do */
 				break;
@@ -1847,6 +2102,98 @@
 	return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
 }
 
+static ssize_t _sde_plane_danger_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct sde_kms *kms = file->private_data;
+	struct sde_mdss_cfg *cfg = kms->catalog;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, kms->dev) {
+		if (plane->fb && plane->state) {
+			sde_plane_danger_signal_ctrl(plane, enable);
+			SDE_DEBUG("plane:%d img:%dx%d ",
+				plane->base.id, plane->fb->width,
+				plane->fb->height);
+			SDE_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				plane->state->src_x >> 16,
+				plane->state->src_y >> 16,
+				plane->state->src_w >> 16,
+				plane->state->src_h >> 16,
+				plane->state->crtc_x, plane->state->crtc_y,
+				plane->state->crtc_w, plane->state->crtc_h);
+		} else {
+			SDE_DEBUG("Inactive plane:%d\n", plane->base.id);
+		}
+	}
+}
+
+static ssize_t _sde_plane_danger_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct sde_kms *kms = file->private_data;
+	struct sde_mdss_cfg *cfg = kms->catalog;
+	int disable_panic;
+	char buf[10];
+
+	if (!cfg)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &disable_panic))
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		SDE_DEBUG("Disabling danger:\n");
+		_sde_plane_set_danger_state(kms, false);
+		kms->has_danger_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		SDE_DEBUG("Enabling danger:\n");
+		kms->has_danger_ctrl = true;
+		_sde_plane_set_danger_state(kms, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations sde_plane_danger_enable = {
+	.open = simple_open,
+	.read = _sde_plane_danger_read,
+	.write = _sde_plane_danger_write,
+};
+
 static void _sde_plane_init_debugfs(struct sde_plane *psde, struct sde_kms *kms)
 {
 	const struct sde_sspp_sub_blks *sblk = 0;
@@ -1889,6 +2236,28 @@
 					kms);
 			sde_debugfs_create_regset32("csc_blk", 0444,
 					psde->debugfs_root, &psde->debugfs_csc);
+
+			debugfs_create_u32("xin_id",
+					0444,
+					psde->debugfs_root,
+					(u32 *) &cfg->xin_id);
+			debugfs_create_u32("clk_ctrl",
+					0444,
+					psde->debugfs_root,
+					(u32 *) &cfg->clk_ctrl);
+			debugfs_create_x32("creq_vblank",
+					0644,
+					psde->debugfs_root,
+					(u32 *) &sblk->creq_vblank);
+			debugfs_create_x32("danger_vblank",
+					0644,
+					psde->debugfs_root,
+					(u32 *) &sblk->danger_vblank);
+
+			debugfs_create_file("disable_danger",
+					0644,
+					psde->debugfs_root,
+					kms, &sde_plane_danger_enable);
 		}
 	}
 }
@@ -1959,6 +2328,17 @@
 		goto clean_sspp;
 	}
 
+	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+		psde->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+			GFP_KERNEL);
+		if (!psde->scaler3_cfg) {
+			SDE_ERROR("[%u]failed to allocate scale struct\n",
+				pipe);
+			ret = -ENOMEM;
+			goto clean_sspp;
+		}
+	}
+
 	/* add plane to DRM framework */
 	psde->nformats = sde_populate_formats(psde->pipe_sblk->format_list,
 			psde->formats,
@@ -2004,6 +2384,9 @@
 clean_sspp:
 	if (psde && psde->pipe_hw)
 		sde_hw_sspp_destroy(psde->pipe_hw);
+
+	if (psde && psde->scaler3_cfg)
+		kfree(psde->scaler3_cfg);
 clean_plane:
 	kfree(psde);
 exit:
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
new file mode 100644
index 0000000..e756e25
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_PLANE_H_
+#define _SDE_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * struct sde_plane_state: Define sde extension of drm plane state object
+ * @base:	base drm plane state object
+ * @property_values:	cached plane property values
+ * @property_blobs:	blob properties
+ * @input_fence:	dereferenced input fence pointer
+ * @stage:	assigned by crtc blender
+ * @dirty:	bitmask for which pipe h/w config functions need to be updated
+ * @pending:	whether the current update is still pending
+ */
+struct sde_plane_state {
+	struct drm_plane_state base;
+	uint64_t property_values[PLANE_PROP_COUNT];
+	struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+	void *input_fence;
+	enum sde_stage stage;
+	uint32_t dirty;
+	bool pending;
+};
+
+#define to_sde_plane_state(x) \
+	container_of(x, struct sde_plane_state, base)
+
+/**
+ * sde_plane_get_property - Query integer value of plane property
+ * @S: Pointer to plane state
+ * @X: Property index, from enum msm_mdp_plane_property
+ * Returns: Integer value of requested property
+ */
+#define sde_plane_get_property(S, X) \
+	((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+/**
+ * sde_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane);
+
+/**
+ * sde_plane_init - create new sde plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  sde hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs);
+
+/**
+ * sde_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * sde_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int sde_plane_color_fill(struct drm_plane *plane,
+		uint32_t color, uint32_t alpha);
+
+#endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index b17ac82..1d27b27 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -21,6 +21,8 @@
 #include "sde_hw_pingpong.h"
 #include "sde_hw_intf.h"
 #include "sde_hw_wb.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
 
 #define RESERVED_BY_OTHER(h, r) \
 	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -90,29 +92,50 @@
 	void *hw;
 };
 
-static void _sde_rm_print_rsvps(struct sde_rm *rm, const char *msg)
+/**
+ * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum sde_rm_dbg_rsvp_stage {
+	SDE_RM_STAGE_BEGIN,
+	SDE_RM_STAGE_AFTER_CLEAR,
+	SDE_RM_STAGE_AFTER_RSVPNEXT,
+	SDE_RM_STAGE_FINAL
+};
+
+static void _sde_rm_print_rsvps(
+		struct sde_rm *rm,
+		enum sde_rm_dbg_rsvp_stage stage)
 {
 	struct sde_rm_rsvp *rsvp;
 	struct sde_rm_hw_blk *blk;
 	enum sde_hw_blk_type type;
 
-	SDE_DEBUG("%s\n", msg);
+	SDE_DEBUG("%d\n", stage);
 
-	list_for_each_entry(rsvp, &rm->rsvps, list)
-		SDE_DEBUG("%s rsvp[s%ue%u] topology %d\n", msg, rsvp->seq,
+	list_for_each_entry(rsvp, &rm->rsvps, list) {
+		SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
 				rsvp->enc_id, rsvp->topology);
+		SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
+	}
 
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
 			if (!blk->rsvp && !blk->rsvp_nxt)
 				continue;
 
-			SDE_DEBUG("%s rsvp[s%ue%u->s%ue%u] %s %d\n", msg,
+			SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %s %d\n", stage,
 				(blk->rsvp) ? blk->rsvp->seq : 0,
 				(blk->rsvp) ? blk->rsvp->enc_id : 0,
 				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
 				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
 				blk->type_name, blk->id);
+
+			SDE_EVT32(stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type, blk->id);
 		}
 	}
 }
@@ -569,7 +592,7 @@
 	struct sde_rm_hw_blk *pp[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter_i, iter_j;
 	int lm_count = 0;
-	int i;
+	int i, rc = 0;
 
 	if (!reqs->num_lm) {
 		SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
@@ -621,16 +644,34 @@
 
 		lm[i]->rsvp_nxt = rsvp;
 		pp[i]->rsvp_nxt = rsvp;
-		MSM_EVTMSG(rm->dev, lm[i]->type_name, rsvp->enc_id, lm[i]->id);
-		MSM_EVTMSG(rm->dev, pp[i]->type_name, rsvp->enc_id, pp[i]->id);
-		if (dspp[i]) {
+		if (dspp[i])
 			dspp[i]->rsvp_nxt = rsvp;
-			MSM_EVTMSG(rm->dev, dspp[i]->type_name, rsvp->enc_id,
-					dspp[i]->id);
+
+		SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
+				dspp[i] ? dspp[i]->id : 0);
+	}
+
+	if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+		/* reserve a free PINGPONG_SLAVE block */
+		rc = -ENAVAIL;
+		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
+		while (sde_rm_get_hw(rm, &iter_i)) {
+			struct sde_pingpong_cfg *pp_cfg =
+				(struct sde_pingpong_cfg *)
+				(iter_i.blk->catalog);
+
+			if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
+				continue;
+			if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
+				continue;
+
+			iter_i.blk->rsvp_nxt = rsvp;
+			rc = 0;
+			break;
 		}
 	}
 
-	return 0;
+	return rc;
 }
 
 static int _sde_rm_reserve_ctls(
@@ -676,8 +717,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
 		ctls[i]->rsvp_nxt = rsvp;
-		MSM_EVTMSG(rm->dev, ctls[i]->type_name, rsvp->enc_id,
-				ctls[i]->id);
+		SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
 	}
 
 	return 0;
@@ -714,8 +754,7 @@
 			continue;
 
 		iter.blk->rsvp_nxt = rsvp;
-		MSM_EVTMSG(rm->dev, iter.blk->type_name, rsvp->enc_id,
-				iter.blk->id);
+		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
 		break;
 	}
 
@@ -749,8 +788,7 @@
 		}
 
 		iter.blk->rsvp_nxt = rsvp;
-		MSM_EVTMSG(rm->dev, iter.blk->type_name,
-				rsvp->enc_id, iter.blk->id);
+		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
 		break;
 	}
 
@@ -949,9 +987,10 @@
 			mode->hdisplay, rm->lm_max_width);
 	SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
 			reqs->num_ctl, reqs->top_name);
-	MSM_EVT(rm->dev, mode->hdisplay, rm->lm_max_width);
-	MSM_EVT(rm->dev, reqs->num_lm, reqs->top_ctrl);
-	MSM_EVT(rm->dev, reqs->top_name, 0);
+	SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
+			reqs->top_name);
+	SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
+			reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
 
 	return 0;
 }
@@ -1109,7 +1148,7 @@
 	if (!ret) {
 		SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
 				rsvp->topology);
-		MSM_EVT(rm->dev, rsvp->enc_id, rsvp->topology);
+		SDE_EVT32(rsvp->enc_id, rsvp->topology);
 	}
 
 	return ret;
@@ -1149,9 +1188,9 @@
 	SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
 			conn_state->connector->base.id, enc->base.id,
 			crtc_state->crtc->base.id, test_only);
-	MSM_EVT(rm->dev, enc->base.id, conn_state->connector->base.id);
+	SDE_EVT32(enc->base.id, conn_state->connector->base.id);
 
-	_sde_rm_print_rsvps(rm, "begin_reserve");
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
 
 	ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
 			conn_state, &reqs);
@@ -1186,14 +1225,14 @@
 				rsvp_cur->seq, rsvp_cur->enc_id);
 		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 		rsvp_cur = NULL;
-		_sde_rm_print_rsvps(rm, "post_clear");
+		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
 	ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
 			rsvp_nxt, &reqs);
 
-	_sde_rm_print_rsvps(rm, "new_rsvp_next");
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
 
 	if (ret) {
 		SDE_ERROR("failed to reserve hw resources: %d\n", ret);
@@ -1217,7 +1256,7 @@
 		ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
 	}
 
-	_sde_rm_print_rsvps(rm, "final");
+	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 950145c..862b5c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -92,6 +92,69 @@
 			__entry->vbif_idx)
 )
 
+
+TRACE_EVENT(sde_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(sde_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(sde_evtlog,
+	TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
+	TP_ARGS(tag, tag_id, value1, value2),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(evtlog_tag, tag)
+			__field(u32, tag_id)
+			__field(u64, value1)
+			__field(u64, value2)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(evtlog_tag, tag);
+			__entry->tag_id = tag_id;
+			__entry->value1 = value1;
+			__entry->value2 = value2;
+	),
+	TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
+			__entry->tag_id, __entry->value1, __entry->value2)
+)
+
+#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
+
+#define SDE_ATRACE_INT(name, value) \
+	trace_sde_trace_counter(current->tgid, name, value)
+
 #endif /* _SDE_TRACE_H_ */
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 6060bde..fb6d9da 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -12,6 +12,8 @@
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
+#include <linux/debugfs.h>
+
 #include "sde_vbif.h"
 #include "sde_hw_vbif.h"
 #include "sde_trace.h"
@@ -198,7 +200,7 @@
 
 	ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
 	if (ret)
-		MSM_EVT(sde_kms->dev, vbif->idx, params->xin_id);
+		SDE_EVT32(vbif->idx, params->xin_id);
 
 	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
 
@@ -207,3 +209,76 @@
 exit:
 	return;
 }
+
+#ifdef CONFIG_DEBUG_FS
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+	debugfs_remove_recursive(sde_kms->debugfs_vbif);
+	sde_kms->debugfs_vbif = NULL;
+}
+
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
+{
+	char vbif_name[32];
+	struct dentry *debugfs_vbif;
+	int i, j;
+
+	sde_kms->debugfs_vbif = debugfs_create_dir("vbif",
+			sde_kms->debugfs_root);
+	if (!sde_kms->debugfs_vbif) {
+		SDE_ERROR("failed to create vbif debugfs\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+		struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
+
+		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+		debugfs_vbif = debugfs_create_dir(vbif_name,
+				sde_kms->debugfs_vbif);
+
+		debugfs_create_u32("features", 0644, debugfs_vbif,
+			(u32 *)&vbif->features);
+
+		debugfs_create_u32("xin_halt_timeout", 0444, debugfs_vbif,
+			(u32 *)&vbif->xin_halt_timeout);
+
+		debugfs_create_u32("default_rd_ot_limit", 0444, debugfs_vbif,
+			(u32 *)&vbif->default_ot_rd_limit);
+
+		debugfs_create_u32("default_wr_ot_limit", 0444, debugfs_vbif,
+			(u32 *)&vbif->default_ot_wr_limit);
+
+		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			struct sde_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_rd_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0444, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0444, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+
+		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			struct sde_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_wr_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0444, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0444, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index 703befa..4b1cb1c 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -35,4 +35,17 @@
 void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
 		struct sde_vbif_set_ot_params *params);
 
+#ifdef CONFIG_DEBUG_FS
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
+#else
+static inline int sde_debugfs_vbif_init(struct sde_kms *sde_kms,
+		struct dentry *debugfs_root)
+{
+	return 0;
+}
+static inline void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
 #endif /* __SDE_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 1778721..647cb58 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -487,7 +487,11 @@
 	return rc;
 }
 
-int sde_wb_dev_init(struct sde_wb_device *wb_dev)
+/**
+ * _sde_wb_dev_init - perform device initialization
+ * @wb_dev:	Pointer to writeback device
+ */
+static int _sde_wb_dev_init(struct sde_wb_device *wb_dev)
 {
 	int rc = 0;
 
@@ -501,7 +505,11 @@
 	return rc;
 }
 
-int sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
+/**
+ * _sde_wb_dev_deinit - perform device de-initialization
+ * @wb_dev:	Pointer to writeback device
+ */
+static int _sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
 {
 	int rc = 0;
 
@@ -515,31 +523,57 @@
 	return rc;
 }
 
-int sde_wb_bind(struct sde_wb_device *wb_dev, struct drm_device *drm_dev)
+/**
+ * sde_wb_bind - bind writeback device with controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ * Returns:     Zero on success
+ */
+static int sde_wb_bind(struct device *dev, struct device *master, void *data)
 {
-	int rc = 0;
+	struct sde_wb_device *wb_dev;
 
-	if (!wb_dev || !drm_dev) {
+	if (!dev || !master) {
 		SDE_ERROR("invalid params\n");
 		return -EINVAL;
 	}
 
+	wb_dev = platform_get_drvdata(to_platform_device(dev));
+	if (!wb_dev) {
+		SDE_ERROR("invalid wb device\n");
+		return -EINVAL;
+	}
+
 	SDE_DEBUG("\n");
 
 	mutex_lock(&wb_dev->wb_lock);
-	wb_dev->drm_dev = drm_dev;
+	wb_dev->drm_dev = dev_get_drvdata(master);
 	mutex_unlock(&wb_dev->wb_lock);
 
-	return rc;
+	return 0;
 }
 
-int sde_wb_unbind(struct sde_wb_device *wb_dev)
+/**
+ * sde_wb_unbind - unbind writeback from controlling device
+ * @dev:        Pointer to base of platform device
+ * @master:     Pointer to container of drm device
+ * @data:       Pointer to private data
+ */
+static void sde_wb_unbind(struct device *dev,
+		struct device *master, void *data)
 {
-	int rc = 0;
+	struct sde_wb_device *wb_dev;
 
-	if (!wb_dev) {
+	if (!dev) {
 		SDE_ERROR("invalid params\n");
-		return -EINVAL;
+		return;
+	}
+
+	wb_dev = platform_get_drvdata(to_platform_device(dev));
+	if (!wb_dev) {
+		SDE_ERROR("invalid wb device\n");
+		return;
 	}
 
 	SDE_DEBUG("\n");
@@ -547,15 +581,23 @@
 	mutex_lock(&wb_dev->wb_lock);
 	wb_dev->drm_dev = NULL;
 	mutex_unlock(&wb_dev->wb_lock);
-
-	return rc;
 }
 
+static const struct component_ops sde_wb_comp_ops = {
+	.bind = sde_wb_bind,
+	.unbind = sde_wb_unbind,
+};
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev:	Pointer to writeback device
+ * @encoder:	Pointer to associated encoder
+ */
 int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
 {
 	int rc = 0;
 
-	if (!wb_dev || !encoder) {
+	if (!wb_dev || !wb_dev->drm_dev || !encoder) {
 		SDE_ERROR("invalid params\n");
 		return -EINVAL;
 	}
@@ -631,7 +673,13 @@
 	list_add(&wb_dev->wb_list, &sde_wb_list);
 	mutex_unlock(&sde_wb_list_lock);
 
-	return 0;
+	if (!_sde_wb_dev_init(wb_dev)) {
+		ret = component_add(&pdev->dev, &sde_wb_comp_ops);
+		if (ret)
+			pr_err("component add failed\n");
+	}
+
+	return ret;
 }
 
 /**
@@ -649,6 +697,8 @@
 
 	SDE_DEBUG("\n");
 
+	(void)_sde_wb_dev_deinit(wb_dev);
+
 	mutex_lock(&sde_wb_list_lock);
 	list_for_each_entry_safe(curr, next, &sde_wb_list, wb_list) {
 		if (curr == wb_dev) {
@@ -681,12 +731,15 @@
 	},
 };
 
-void sde_wb_register(void)
+static int __init sde_wb_register(void)
 {
-	platform_driver_register(&sde_wb_driver);
+	return platform_driver_register(&sde_wb_driver);
 }
 
-void sde_wb_unregister(void)
+static void __exit sde_wb_unregister(void)
 {
 	platform_driver_unregister(&sde_wb_driver);
 }
+
+module_init(sde_wb_register);
+module_exit(sde_wb_unregister);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index 7905620..4e33595 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -17,6 +17,7 @@
 
 #include "msm_kms.h"
 #include "sde_kms.h"
+#include "sde_connector.h"
 
 /**
  * struct sde_wb_device - Writeback device context
@@ -93,35 +94,8 @@
  */
 int wb_display_get_displays(void **display_array, u32 max_display_count);
 
-/**
- * wb_display_get_displays - returns pointers for supported display devices
- * @display_array: Pointer to display array to be filled
- * @max_display_count: Size of display_array
- * @Returns: Number of display entries filled
- */
-int sde_wb_dev_init(struct sde_wb_device *wb_dev);
-
-/**
- * sde_wb_dev_deinit - perform device de-initialization
- * @wb_dev:	Pointer to writeback device
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_dev_deinit(struct sde_wb_device *wb_dev);
-
-/**
- * sde_wb_bind - bind writeback device with controlling device
- * @wb_dev:	Pointer to writeback device
- * @drm_dev:	Pointer to controlling DRM device
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_bind(struct sde_wb_device *wb_dev, struct drm_device *drm_dev);
-
-/**
- * sde_wb_unbind - unbind writeback from controlling device
- * @wb_dev:	Pointer to writeback device
- * Returns:	0 if success; error code otherwise
- */
-int sde_wb_unbind(struct sde_wb_device *wb_dev);
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active);
+bool sde_wb_is_active(struct sde_wb_device *wb_dev);
 
 /**
  * sde_wb_drm_init - perform DRM initialization
@@ -139,16 +113,6 @@
 int sde_wb_drm_deinit(struct sde_wb_device *wb_dev);
 
 /**
- * sde_wb_register - register writeback module
- */
-void sde_wb_register(void);
-
-/**
- * sde_wb_unregister - unregister writeback module
- */
-void sde_wb_unregister(void);
-
-/**
  * sde_wb_config - setup connection status and available drm modes of the
  *			given writeback connector
  * @drm_dev:	Pointer to DRM device
@@ -274,24 +238,13 @@
 	return 0;
 }
 static inline
-int sde_wb_dev_init(struct sde_wb_device *wb_dev)
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active)
 {
-	return 0;
 }
 static inline
-int sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
+bool sde_wb_is_active(struct sde_wb_device *wb_dev)
 {
-	return 0;
-}
-static inline
-int sde_wb_bind(struct sde_wb_device *wb_dev, struct drm_device *drm_dev)
-{
-	return 0;
-}
-static inline
-int sde_wb_unbind(struct sde_wb_device *wb_dev)
-{
-	return 0;
+	return false;
 }
 static inline
 int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
@@ -304,14 +257,6 @@
 	return 0;
 }
 static inline
-void sde_wb_register(void)
-{
-}
-static inline
-void sde_wb_unregister(void)
-{
-}
-static inline
 int sde_wb_config(struct drm_device *drm_dev, void *data,
 				struct drm_file *file_priv)
 {
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
new file mode 100644
index 0000000..271c41f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SDE_DBG_H_
+#define SDE_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#define SDE_EVTLOG_DATA_LIMITER	(-1)
+#define SDE_EVTLOG_FUNC_ENTRY	0x1111
+#define SDE_EVTLOG_FUNC_EXIT	0x2222
+
+#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
+
+enum sde_dbg_evtlog_flag {
+	SDE_EVTLOG_DEFAULT = BIT(0),
+	SDE_EVTLOG_IRQ = BIT(1),
+	SDE_EVTLOG_ALL = BIT(7)
+};
+
+/**
+ * SDE_EVT32 - Write an list of 32bit values as an event into the event log
+ * ... - variable arguments
+ */
+#define SDE_EVT32(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_DEFAULT, \
+		##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER)
+#define SDE_EVT32_IRQ(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_IRQ, \
+		##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER)
+
+#define SDE_DBG_DUMP(...)	\
+	sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
+#define SDE_DBG_DUMP_WQ(...)	\
+	sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
+#if defined(CONFIG_DEBUG_FS)
+
+int sde_evtlog_init(struct dentry *debugfs_root);
+void sde_evtlog_destroy(void);
+void sde_evtlog(const char *name, int line, int flag, ...);
+void sde_dbg_dump(bool queue, const char *name, ...);
+#else
+static inline int sde_evtlog_init(struct dentry *debugfs_root) { return 0; }
+static inline void sde_evtlog(const char *name, int line,  flag, ...) {}
+static inline void sde_evtlog_destroy(void) { }
+static inline void sde_dbg_dump(bool queue, const char *name, ...) {}
+#endif
+
+#endif /* SDE_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
new file mode 100644
index 0000000..7283277
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -0,0 +1,326 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"sde_evtlog:[%s] " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+
+#include "sde_dbg.h"
+#include "sde_trace.h"
+
+#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
+#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#else
+#define SDE_EVTLOG_DEFAULT_ENABLE 0
+#endif
+
+#define SDE_DBG_DEFAULT_PANIC		1
+
+/*
+ * evtlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from evtlog message
+ * flood.
+ */
+#define SDE_EVTLOG_PRINT_ENTRY	256
+
+/*
+ * evtlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound evtlog
+ * entry array access.
+ */
+#define SDE_EVTLOG_ENTRY	(SDE_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_EVTLOG_MAX_DATA 15
+#define SDE_EVTLOG_BUF_MAX 512
+#define SDE_EVTLOG_BUF_ALIGN 32
+
+DEFINE_SPINLOCK(sde_evtloglock);
+
+struct tlog {
+	u32 counter;
+	s64 time;
+	const char *name;
+	int line;
+	u32 data[SDE_EVTLOG_MAX_DATA];
+	u32 data_cnt;
+	int pid;
+};
+
+static struct sde_dbg_evtlog {
+	struct tlog logs[SDE_EVTLOG_ENTRY];
+	u32 first;
+	u32 last;
+	u32 curr;
+	struct dentry *evtlog;
+	u32 evtlog_enable;
+	u32 panic_on_err;
+	struct work_struct evtlog_dump_work;
+	bool work_panic;
+} sde_dbg_evtlog;
+
+static inline bool sde_evtlog_is_enabled(u32 flag)
+{
+	return (flag & sde_dbg_evtlog.evtlog_enable) ||
+		(flag == SDE_EVTLOG_ALL && sde_dbg_evtlog.evtlog_enable);
+}
+
+void sde_evtlog(const char *name, int line, int flag, ...)
+{
+	unsigned long flags;
+	int i, val = 0;
+	va_list args;
+	struct tlog *log;
+
+	if (!sde_evtlog_is_enabled(flag))
+		return;
+
+	spin_lock_irqsave(&sde_evtloglock, flags);
+	log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.curr];
+	log->time = ktime_to_us(ktime_get());
+	log->name = name;
+	log->line = line;
+	log->data_cnt = 0;
+	log->pid = current->pid;
+
+	va_start(args, flag);
+	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+
+		val = va_arg(args, int);
+		if (val == SDE_EVTLOG_DATA_LIMITER)
+			break;
+
+		log->data[i] = val;
+	}
+	va_end(args);
+	log->data_cnt = i;
+	sde_dbg_evtlog.curr = (sde_dbg_evtlog.curr + 1) % SDE_EVTLOG_ENTRY;
+	sde_dbg_evtlog.last++;
+
+	trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
+			i > 1 ? log->data[1] : 0);
+
+	spin_unlock_irqrestore(&sde_evtloglock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool _sde_evtlog_dump_calc_range(void)
+{
+	static u32 next;
+	bool need_dump = true;
+	unsigned long flags;
+	struct sde_dbg_evtlog *evtlog = &sde_dbg_evtlog;
+
+	spin_lock_irqsave(&sde_evtloglock, flags);
+
+	evtlog->first = next;
+
+	if (evtlog->last == evtlog->first) {
+		need_dump = false;
+		goto dump_exit;
+	}
+
+	if (evtlog->last < evtlog->first) {
+		evtlog->first %= SDE_EVTLOG_ENTRY;
+		if (evtlog->last < evtlog->first)
+			evtlog->last += SDE_EVTLOG_ENTRY;
+	}
+
+	if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+		pr_warn("evtlog buffer overflow before dump: %d\n",
+			evtlog->last - evtlog->first);
+		evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
+	}
+	next = evtlog->first + 1;
+
+dump_exit:
+	spin_unlock_irqrestore(&sde_evtloglock, flags);
+
+	return need_dump;
+}
+
+static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size)
+{
+	int i;
+	ssize_t off = 0;
+	struct tlog *log, *prev_log;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sde_evtloglock, flags);
+
+	log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.first %
+		SDE_EVTLOG_ENTRY];
+
+	prev_log = &sde_dbg_evtlog.logs[(sde_dbg_evtlog.first - 1) %
+		SDE_EVTLOG_ENTRY];
+
+	off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
+		log->name, log->line);
+
+	if (off < SDE_EVTLOG_BUF_ALIGN) {
+		memset((evtlog_buf + off), 0x20, (SDE_EVTLOG_BUF_ALIGN - off));
+		off = SDE_EVTLOG_BUF_ALIGN;
+	}
+
+	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+		"=>[%-8d:%-11llu:%9llu][%-4d]:", sde_dbg_evtlog.first,
+		log->time, (log->time - prev_log->time), log->pid);
+
+	for (i = 0; i < log->data_cnt; i++)
+		off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+			"%x ", log->data[i]);
+
+	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
+
+	spin_unlock_irqrestore(&sde_evtloglock, flags);
+
+	return off;
+}
+
+static void _sde_evtlog_dump_all(void)
+{
+	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+	while (_sde_evtlog_dump_calc_range()) {
+		sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX);
+		pr_info("%s", evtlog_buf);
+	}
+}
+
+static void _sde_dump_array(bool dead, const char *name)
+{
+	_sde_evtlog_dump_all();
+
+	if (dead && sde_dbg_evtlog.panic_on_err)
+		panic(name);
+}
+
+static void _sde_dump_work(struct work_struct *work)
+{
+	_sde_dump_array(sde_dbg_evtlog.work_panic, "evtlog_workitem");
+}
+
+void sde_dbg_dump(bool queue, const char *name, ...)
+{
+	int i;
+	bool dead = false;
+	va_list args;
+	char *blk_name = NULL;
+
+	if (!sde_evtlog_is_enabled(SDE_EVTLOG_DEFAULT))
+		return;
+
+	if (queue && work_pending(&sde_dbg_evtlog.evtlog_dump_work))
+		return;
+
+	va_start(args, name);
+	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+		blk_name = va_arg(args, char*);
+		if (IS_ERR_OR_NULL(blk_name))
+			break;
+
+		if (!strcmp(blk_name, "panic"))
+			dead = true;
+	}
+	va_end(args);
+
+	if (queue) {
+		/* schedule work to dump later */
+		sde_dbg_evtlog.work_panic = dead;
+		schedule_work(&sde_dbg_evtlog.evtlog_dump_work);
+	} else {
+		_sde_dump_array(dead, name);
+	}
+}
+
+static int sde_evtlog_dump_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+	if (_sde_evtlog_dump_calc_range()) {
+		len = sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX);
+		if (copy_to_user(buff, evtlog_buf, len))
+			return -EFAULT;
+		*ppos += len;
+	}
+
+	return len;
+}
+
+static ssize_t sde_evtlog_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	_sde_evtlog_dump_all();
+
+	if (sde_dbg_evtlog.panic_on_err)
+		panic("sde");
+
+	return count;
+}
+
+static const struct file_operations sde_evtlog_fops = {
+	.open = sde_evtlog_dump_open,
+	.read = sde_evtlog_dump_read,
+	.write = sde_evtlog_dump_write,
+};
+
+int sde_evtlog_init(struct dentry *debugfs_root)
+{
+	int i;
+
+	sde_dbg_evtlog.evtlog = debugfs_create_dir("evt_dbg", debugfs_root);
+	if (IS_ERR_OR_NULL(sde_dbg_evtlog.evtlog)) {
+		pr_err("debugfs_create_dir fail, error %ld\n",
+		       PTR_ERR(sde_dbg_evtlog.evtlog));
+		sde_dbg_evtlog.evtlog = NULL;
+		return -ENODEV;
+	}
+
+	INIT_WORK(&sde_dbg_evtlog.evtlog_dump_work, _sde_dump_work);
+	sde_dbg_evtlog.work_panic = false;
+
+	for (i = 0; i < SDE_EVTLOG_ENTRY; i++)
+		sde_dbg_evtlog.logs[i].counter = i;
+
+	debugfs_create_file("dump", 0644, sde_dbg_evtlog.evtlog, NULL,
+						&sde_evtlog_fops);
+	debugfs_create_u32("enable", 0644, sde_dbg_evtlog.evtlog,
+			    &sde_dbg_evtlog.evtlog_enable);
+	debugfs_create_u32("panic", 0644, sde_dbg_evtlog.evtlog,
+			    &sde_dbg_evtlog.panic_on_err);
+
+	sde_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
+	sde_dbg_evtlog.panic_on_err = SDE_DBG_DEFAULT_PANIC;
+
+	pr_info("evtlog_status: enable:%d, panic:%d\n",
+		sde_dbg_evtlog.evtlog_enable, sde_dbg_evtlog.panic_on_err);
+
+	return 0;
+}
+
+void sde_evtlog_destroy(void)
+{
+	debugfs_remove(sde_dbg_evtlog.evtlog);
+}
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644
index 0000000..70a4225
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS  16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+	u32 in_val;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	writel_relaxed(value, io->base + offset);
+	if (debug) {
+		in_val = readl_relaxed(io->base + offset);
+		DEV_DBG("[%08x] => %08x [%08x]\n",
+			(u32)(unsigned long)(io->base + offset),
+			value, in_val);
+	}
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+	u32 value;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	value = readl_relaxed(io->base + offset);
+	if (debug)
+		DEV_DBG("[%08x] <= %08x\n",
+			(u32)(unsigned long)(io->base + offset), value);
+
+	return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	u32 debug)
+{
+	if (debug)
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+			(void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+	unsigned int type, const char *name)
+{
+	struct resource *res = NULL;
+
+	res = platform_get_resource_byname(pdev, type, name);
+	if (!res)
+		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+	return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name)
+{
+	struct resource *res = NULL;
+
+	if (!pdev || !io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -ENODEV;
+	}
+
+	io_data->len = (u32)resource_size(res);
+	io_data->base = ioremap(res->start, io_data->len);
+	if (!io_data->base) {
+		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -EIO;
+	}
+
+	return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+	if (!io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (io_data->base) {
+		iounmap(io_data->base);
+		io_data->base = NULL;
+	}
+	io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config)
+{
+	int i = 0, rc = 0;
+	struct dss_vreg *curr_vreg = NULL;
+	enum dss_vreg_type type;
+
+	if (!in_vreg || !num_vreg)
+		return rc;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+				curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+					 __builtin_return_address(0), __func__,
+					 curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+			type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+			if (type == DSS_REG_LDO) {
+				rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					DEV_ERR("%pS->%s: %s set vltg fail\n",
+						__builtin_return_address(0),
+						__func__,
+						curr_vreg->vreg_name);
+					goto vreg_set_voltage_fail;
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				type = (regulator_count_voltages(
+					curr_vreg->vreg) > 0)
+					? DSS_REG_LDO : DSS_REG_VS;
+				if (type == DSS_REG_LDO) {
+					regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+				}
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+	regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+			? DSS_REG_LDO : DSS_REG_VS;
+		goto vreg_unconfig;
+	}
+	return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+	int i = 0, rc = 0;
+	bool need_sleep;
+
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name, rc);
+				goto vreg_set_opt_mode_fail;
+			}
+			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+			if (in_vreg[i].pre_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].pre_on_sleep * 1000,
+					in_vreg[i].pre_on_sleep * 1000);
+			rc = regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].enable_load);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s set opt m fail\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto vreg_set_opt_mode_fail;
+			}
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (in_vreg[i].post_on_sleep && need_sleep)
+				usleep_range(in_vreg[i].post_on_sleep * 1000,
+					in_vreg[i].post_on_sleep * 1000);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			if (in_vreg[i].pre_off_sleep)
+				usleep_range(in_vreg[i].pre_off_sleep * 1000,
+					in_vreg[i].pre_off_sleep * 1000);
+			regulator_set_load(in_vreg[i].vreg,
+				in_vreg[i].disable_load);
+			regulator_disable(in_vreg[i].vreg);
+			if (in_vreg[i].post_off_sleep)
+				usleep_range(in_vreg[i].post_off_sleep * 1000,
+					in_vreg[i].post_off_sleep * 1000);
+		}
+	}
+	return rc;
+
+disable_vreg:
+	regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+	for (i--; i >= 0; i--) {
+		if (in_vreg[i].pre_off_sleep)
+			usleep_range(in_vreg[i].pre_off_sleep * 1000,
+				in_vreg[i].pre_off_sleep * 1000);
+		regulator_set_load(in_vreg[i].vreg,
+			in_vreg[i].disable_load);
+		regulator_disable(in_vreg[i].vreg);
+		if (in_vreg[i].post_off_sleep)
+			usleep_range(in_vreg[i].post_off_sleep * 1000,
+				in_vreg[i].post_off_sleep * 1000);
+	}
+
+	return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+	int i = 0, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_gpio; i++) {
+			DEV_DBG("%pS->%s: %s enable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
+			rc = gpio_request(in_gpio[i].gpio,
+				in_gpio[i].gpio_name);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_gpio[i].gpio_name);
+				goto disable_gpio;
+			}
+			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+		}
+	} else {
+		for (i = num_gpio-1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: %s disable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+			if (in_gpio[i].gpio)
+				gpio_free(in_gpio[i].gpio);
+		}
+	}
+	return rc;
+
+disable_gpio:
+	for (i--; i >= 0; i--)
+		if (in_gpio[i].gpio)
+			gpio_free(in_gpio[i].gpio);
+
+	return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	msm_dss_put_clk(clk_arry, num_clk);
+
+	return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *read_buf)
+{
+	struct i2c_msg msgs[2];
+	int ret = -1;
+
+	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].buf = &reg_offset;
+	msgs[0].len = 1;
+
+	msgs[1].addr = slave_addr >> 1;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = read_buf;
+	msgs[1].len = 1;
+
+	ret = i2c_transfer(client->adapter, msgs, 2);
+	if (ret < 1) {
+		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+		return -EACCES;
+	}
+	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+	return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value)
+{
+	struct i2c_msg msgs[1];
+	uint8_t data[2];
+	int status = -EACCES;
+
+	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	data[0] = reg_offset;
+	data[1] = *value;
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].len = 2;
+	msgs[0].buf = data;
+
+	status = i2c_transfer(client->adapter, msgs, 1);
+	if (status < 1) {
+		pr_err("I2C WRITE FAILED=[%d]\n", status);
+		return -EACCES;
+	}
+	pr_debug("%s: I2C write status=%x\n", __func__, status);
+	return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index e2e8e60..f2b64ca 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -24,7 +24,7 @@
 
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
 
 #include "sde_power_handle.h"
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msmskunk.c b/drivers/pinctrl/qcom/pinctrl-msmskunk.c
index 9d3ec83..e203b2d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msmskunk.c
+++ b/drivers/pinctrl/qcom/pinctrl-msmskunk.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,10 +18,10 @@
 
 #include "pinctrl-msm.h"
 
-#define FUNCTION(fname)			                \
-	[msm_mux_##fname] = {		                \
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {				\
 		.name = #fname,				\
-		.groups = fname##_groups,               \
+		.groups = fname##_groups,		\
 		.ngroups = ARRAY_SIZE(fname##_groups),	\
 	}
 
@@ -29,7 +29,7 @@
 #define SOUTH	0x00900000
 #define REG_SIZE 0x1000
 #define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
-	{					        \
+	{						\
 		.name = "gpio" #id,			\
 		.pins = gpio##id##_pins,		\
 		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
@@ -44,11 +44,11 @@
 			msm_mux_##f7,			\
 			msm_mux_##f8,			\
 			msm_mux_##f9			\
-		},				        \
+		},					\
 		.nfuncs = 10,				\
 		.ctl_reg = base + REG_SIZE * id,		\
 		.io_reg = base + 0x4 + REG_SIZE * id,		\
-		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
 		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
 		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
 		.mux_bit = 2,			\
@@ -68,7 +68,7 @@
 	}
 
 #define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
-	{					        \
+	{						\
 		.name = #pg_name,			\
 		.pins = pg_name##_pins,			\
 		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
@@ -91,6 +91,31 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
 static const struct pinctrl_pin_desc msmskunk_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
@@ -499,7 +524,6 @@
 	msm_mux_reserved30,
 	msm_mux_qup11,
 	msm_mux_qup14,
-	msm_mux_reserved31,
 	msm_mux_phase_flag3,
 	msm_mux_reserved96,
 	msm_mux_ldo_en,
@@ -597,6 +621,7 @@
 	msm_mux_reserved147,
 	msm_mux_reserved148,
 	msm_mux_reserved149,
+	msm_mux_reserved31,
 	msm_mux_reserved32,
 	msm_mux_reserved33,
 	msm_mux_reserved34,
@@ -700,15 +725,6 @@
 	msm_mux_reserved79,
 	msm_mux_reserved80,
 	msm_mux_qup15,
-	msm_mux_reserved81,
-	msm_mux_reserved82,
-	msm_mux_reserved83,
-	msm_mux_reserved84,
-	msm_mux_pcie1_pwrfault,
-	msm_mux_qup5,
-	msm_mux_reserved85,
-	msm_mux_pcie1_mrl,
-	msm_mux_reserved86,
 	msm_mux_reserved87,
 	msm_mux_reserved88,
 	msm_mux_tsif1_clk,
@@ -733,6 +749,15 @@
 	msm_mux_vfr_1,
 	msm_mux_tgu_ch2,
 	msm_mux_reserved92,
+	msm_mux_reserved81,
+	msm_mux_reserved82,
+	msm_mux_reserved83,
+	msm_mux_reserved84,
+	msm_mux_pcie1_pwrfault,
+	msm_mux_qup5,
+	msm_mux_reserved85,
+	msm_mux_pcie1_mrl,
+	msm_mux_reserved86,
 	msm_mux_tsif2_clk,
 	msm_mux_sdc4_clk,
 	msm_mux_qup7,
@@ -1048,9 +1073,6 @@
 static const char * const qup14_groups[] = {
 	"gpio31", "gpio32", "gpio33", "gpio34",
 };
-static const char * const reserved31_groups[] = {
-	"gpio31",
-};
 static const char * const phase_flag3_groups[] = {
 	"gpio96",
 };
@@ -1343,6 +1365,9 @@
 static const char * const reserved149_groups[] = {
 	"gpio149", "gpio149",
 };
+static const char * const reserved31_groups[] = {
+	"gpio31",
+};
 static const char * const reserved32_groups[] = {
 	"gpio32",
 };
@@ -1654,33 +1679,6 @@
 static const char * const qup15_groups[] = {
 	"gpio81", "gpio82", "gpio83", "gpio84",
 };
-static const char * const reserved81_groups[] = {
-	"gpio81",
-};
-static const char * const reserved82_groups[] = {
-	"gpio82",
-};
-static const char * const reserved83_groups[] = {
-	"gpio83",
-};
-static const char * const reserved84_groups[] = {
-	"gpio84",
-};
-static const char * const pcie1_pwrfault_groups[] = {
-	"gpio85",
-};
-static const char * const qup5_groups[] = {
-	"gpio85", "gpio86", "gpio87", "gpio88",
-};
-static const char * const reserved85_groups[] = {
-	"gpio85",
-};
-static const char * const pcie1_mrl_groups[] = {
-	"gpio86",
-};
-static const char * const reserved86_groups[] = {
-	"gpio86",
-};
 static const char * const reserved87_groups[] = {
 	"gpio87",
 };
@@ -1753,6 +1751,33 @@
 static const char * const reserved92_groups[] = {
 	"gpio92",
 };
+static const char * const reserved81_groups[] = {
+	"gpio81",
+};
+static const char * const reserved82_groups[] = {
+	"gpio82",
+};
+static const char * const reserved83_groups[] = {
+	"gpio83",
+};
+static const char * const reserved84_groups[] = {
+	"gpio84",
+};
+static const char * const pcie1_pwrfault_groups[] = {
+	"gpio85",
+};
+static const char * const qup5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const reserved85_groups[] = {
+	"gpio85",
+};
+static const char * const pcie1_mrl_groups[] = {
+	"gpio86",
+};
+static const char * const reserved86_groups[] = {
+	"gpio86",
+};
 static const char * const tsif2_clk_groups[] = {
 	"gpio93",
 };
@@ -1885,7 +1910,6 @@
 	FUNCTION(reserved30),
 	FUNCTION(qup11),
 	FUNCTION(qup14),
-	FUNCTION(reserved31),
 	FUNCTION(phase_flag3),
 	FUNCTION(reserved96),
 	FUNCTION(ldo_en),
@@ -1983,6 +2007,7 @@
 	FUNCTION(reserved147),
 	FUNCTION(reserved148),
 	FUNCTION(reserved149),
+	FUNCTION(reserved31),
 	FUNCTION(reserved32),
 	FUNCTION(reserved33),
 	FUNCTION(reserved34),
@@ -2086,15 +2111,6 @@
 	FUNCTION(reserved79),
 	FUNCTION(reserved80),
 	FUNCTION(qup15),
-	FUNCTION(reserved81),
-	FUNCTION(reserved82),
-	FUNCTION(reserved83),
-	FUNCTION(reserved84),
-	FUNCTION(pcie1_pwrfault),
-	FUNCTION(qup5),
-	FUNCTION(reserved85),
-	FUNCTION(pcie1_mrl),
-	FUNCTION(reserved86),
 	FUNCTION(reserved87),
 	FUNCTION(reserved88),
 	FUNCTION(tsif1_clk),
@@ -2119,6 +2135,15 @@
 	FUNCTION(vfr_1),
 	FUNCTION(tgu_ch2),
 	FUNCTION(reserved92),
+	FUNCTION(reserved81),
+	FUNCTION(reserved82),
+	FUNCTION(reserved83),
+	FUNCTION(reserved84),
+	FUNCTION(pcie1_pwrfault),
+	FUNCTION(qup5),
+	FUNCTION(reserved85),
+	FUNCTION(pcie1_mrl),
+	FUNCTION(reserved86),
 	FUNCTION(tsif2_clk),
 	FUNCTION(sdc4_clk),
 	FUNCTION(qup7),
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 7290eef..f4ce22d 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -54,6 +54,16 @@
 
 	  If unsure, say no.
 
+config REGULATOR_PROXY_CONSUMER
+	bool "Boot time regulator proxy consumer support"
+	help
+	  This driver provides support for boot time regulator proxy requests.
+	  It can enforce a specified voltage range, set a minimum current,
+	  and/or keep a regulator enabled.  It is needed in circumstances where
+	  reducing one or more of these three quantities will cause hardware to
+	  stop working if performed before the driver managing the hardware has
+	  probed.
+
 config REGULATOR_88PM800
 	tristate "Marvell 88PM800 Power regulators"
 	depends on MFD_88PM800
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index faafafa..2e466b1 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
 obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
 obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+obj-$(CONFIG_REGULATOR_PROXY_CONSUMER) += proxy-consumer.o
 
 obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 5c1519b..183fa22 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -27,6 +27,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
@@ -222,6 +224,15 @@
 		return -EPERM;
 	}
 
+	/* check if requested voltage range actually overlaps the constraints */
+	if (*max_uV < rdev->constraints->min_uV ||
+	    *min_uV > rdev->constraints->max_uV) {
+		rdev_err(rdev, "requested voltage range [%d, %d] does not fit within constraints: [%d, %d]\n",
+			*min_uV, *max_uV, rdev->constraints->min_uV,
+			rdev->constraints->max_uV);
+		return -EINVAL;
+	}
+
 	if (*max_uV > rdev->constraints->max_uV)
 		*max_uV = rdev->constraints->max_uV;
 	if (*min_uV < rdev->constraints->min_uV)
@@ -243,6 +254,8 @@
 				     int *min_uV, int *max_uV)
 {
 	struct regulator *regulator;
+	int init_min_uV = *min_uV;
+	int init_max_uV = *max_uV;
 
 	list_for_each_entry(regulator, &rdev->consumer_list, list) {
 		/*
@@ -252,6 +265,12 @@
 		if (!regulator->min_uV && !regulator->max_uV)
 			continue;
 
+		if (init_max_uV < regulator->min_uV
+		    || init_min_uV > regulator->max_uV)
+			rdev_err(rdev, "requested voltage range [%d, %d] does not fit within previously voted range: [%d, %d]\n",
+				init_min_uV, init_max_uV, regulator->min_uV,
+				regulator->max_uV);
+
 		if (*max_uV > regulator->max_uV)
 			*max_uV = regulator->max_uV;
 		if (*min_uV < regulator->min_uV)
@@ -660,7 +679,7 @@
 {
 	struct regulator *sibling;
 	int current_uA = 0, output_uV, input_uV, err;
-	unsigned int mode;
+	unsigned int regulator_curr_mode, mode;
 
 	lockdep_assert_held_once(&rdev->mutex);
 
@@ -720,6 +739,14 @@
 				 current_uA, input_uV, output_uV);
 			return err;
 		}
+		/* return if the same mode is requested */
+		if (rdev->desc->ops->get_mode) {
+			regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
+			if (regulator_curr_mode == mode)
+				return 0;
+		} else {
+			return 0;
+		}
 
 		err = rdev->desc->ops->set_mode(rdev, mode);
 		if (err < 0)
@@ -1565,16 +1592,6 @@
 		return ret;
 	}
 
-	/* Cascade always-on state to supply */
-	if (_regulator_is_enabled(rdev)) {
-		ret = regulator_enable(rdev->supply);
-		if (ret < 0) {
-			_regulator_put(rdev->supply);
-			rdev->supply = NULL;
-			return ret;
-		}
-	}
-
 	return 0;
 }
 
@@ -2161,6 +2178,8 @@
 			if (ret < 0)
 				return ret;
 
+			_notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE,
+						NULL);
 		} else if (ret < 0) {
 			rdev_err(rdev, "is_enabled() failed: %d\n", ret);
 			return ret;
@@ -2199,7 +2218,11 @@
 	}
 
 	mutex_lock(&rdev->mutex);
+
 	ret = _regulator_enable(rdev);
+	if (ret == 0)
+		regulator->enabled++;
+
 	mutex_unlock(&rdev->mutex);
 
 	if (ret != 0 && rdev->supply)
@@ -2308,6 +2331,8 @@
 
 	mutex_lock(&rdev->mutex);
 	ret = _regulator_disable(rdev);
+	if (ret == 0)
+		regulator->enabled--;
 	mutex_unlock(&rdev->mutex);
 
 	if (ret == 0 && rdev->supply)
@@ -2618,6 +2643,40 @@
 EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
 
 /**
+ * regulator_list_corner_voltage - return the maximum voltage in microvolts that
+ *	can be physically configured for the regulator when operating at the
+ *	specified voltage corner
+ * @regulator: regulator source
+ * @corner: voltage corner value
+ * Context: can sleep
+ *
+ * This function can be used for regulators which allow scaling between
+ * different voltage corners as opposed to be different absolute voltages.  The
+ * absolute voltage for a given corner may vary part-to-part or for a given part
+ * at runtime based upon various factors.
+ *
+ * Returns a voltage corresponding to the specified voltage corner or a negative
+ * errno if the corner value can't be used on this system.
+ */
+int regulator_list_corner_voltage(struct regulator *regulator, int corner)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret;
+
+	if (corner < rdev->constraints->min_uV ||
+	    corner > rdev->constraints->max_uV ||
+	    !rdev->desc->ops->list_corner_voltage)
+		return -EINVAL;
+
+	mutex_lock(&rdev->mutex);
+	ret = rdev->desc->ops->list_corner_voltage(rdev, corner);
+	mutex_unlock(&rdev->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(regulator_list_corner_voltage);
+
+/**
  * regulator_get_linear_step - return the voltage step size between VSEL values
  * @regulator: regulator source
  *
@@ -3426,7 +3485,8 @@
 	if (enable && !regulator->bypass) {
 		rdev->bypass_count++;
 
-		if (rdev->bypass_count == rdev->open_count) {
+		if (rdev->bypass_count == rdev->open_count -
+		    rdev->open_offset) {
 			ret = rdev->desc->ops->set_bypass(rdev, enable);
 			if (ret != 0)
 				rdev->bypass_count--;
@@ -3435,7 +3495,8 @@
 	} else if (!enable && regulator->bypass) {
 		rdev->bypass_count--;
 
-		if (rdev->bypass_count != rdev->open_count) {
+		if (rdev->bypass_count != rdev->open_count -
+		    rdev->open_offset) {
 			ret = rdev->desc->ops->set_bypass(rdev, enable);
 			if (ret != 0)
 				rdev->bypass_count++;
@@ -3863,11 +3924,269 @@
 	.dev_groups = regulator_dev_groups,
 };
 
+#ifdef CONFIG_DEBUG_FS
+
+static int reg_debug_enable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int ret;
+
+	if (val) {
+		ret = regulator_enable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "enable failed, ret=%d\n",
+				ret);
+	} else {
+		ret = regulator_disable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "disable failed, ret=%d\n",
+				ret);
+	}
+
+	return ret;
+}
+
+static int reg_debug_enable_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+
+	*val = regulator_is_enabled(regulator);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_enable_fops, reg_debug_enable_get,
+			reg_debug_enable_set, "%llu\n");
+
+static int reg_debug_bypass_enable_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+	struct regulator_dev *rdev = regulator->rdev;
+	bool enable = false;
+	int ret = 0;
+
+	mutex_lock(&rdev->mutex);
+	if (rdev->desc->ops->get_bypass) {
+		ret = rdev->desc->ops->get_bypass(rdev, &enable);
+		if (ret)
+			rdev_err(rdev, "get_bypass() failed, ret=%d\n", ret);
+	} else {
+		enable = (rdev->bypass_count == rdev->open_count
+			  - rdev->open_offset);
+	}
+	mutex_unlock(&rdev->mutex);
+
+	*val = enable;
+
+	return ret;
+}
+
+static int reg_debug_bypass_enable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret = 0;
+
+	mutex_lock(&rdev->mutex);
+	rdev->open_offset = 0;
+	mutex_unlock(&rdev->mutex);
+
+	ret = regulator_allow_bypass(data, val);
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_bypass_enable_fops, reg_debug_bypass_enable_get,
+			reg_debug_bypass_enable_set, "%llu\n");
+
+static int reg_debug_force_disable_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int ret = 0;
+
+	if (val > 0) {
+		ret = regulator_force_disable(regulator);
+		if (ret)
+			rdev_err(regulator->rdev, "force_disable failed, ret=%d\n",
+				ret);
+	}
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_force_disable_fops, reg_debug_enable_get,
+			reg_debug_force_disable_set, "%llu\n");
+
+#define MAX_DEBUG_BUF_LEN 50
+
+static ssize_t reg_debug_voltage_write(struct file *file,
+			const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct regulator *regulator = file->private_data;
+	char buf[MAX_DEBUG_BUF_LEN];
+	int ret, filled;
+	int min_uV, max_uV = -1;
+
+	if (count < MAX_DEBUG_BUF_LEN) {
+		if (copy_from_user(buf, ubuf, count))
+			return -EFAULT;
+
+		buf[count] = '\0';
+		filled = sscanf(buf, "%d %d", &min_uV, &max_uV);
+
+		/* Check that both min and max voltage were specified. */
+		if (filled < 2 || min_uV < 0 || max_uV < min_uV) {
+			rdev_err(regulator->rdev, "incorrect values specified: \"%s\"; should be: \"min_uV max_uV\"\n",
+				buf);
+			return -EINVAL;
+		}
+
+		ret = regulator_set_voltage(regulator, min_uV, max_uV);
+		if (ret) {
+			rdev_err(regulator->rdev, "set voltage(%d, %d) failed, ret=%d\n",
+				min_uV, max_uV, ret);
+			return ret;
+		}
+	} else {
+		rdev_err(regulator->rdev, "voltage request string exceeds maximum buffer size\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t reg_debug_voltage_read(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct regulator *regulator = file->private_data;
+	char buf[MAX_DEBUG_BUF_LEN];
+	int voltage, ret;
+
+	voltage = regulator_get_voltage(regulator);
+
+	ret = snprintf(buf, MAX_DEBUG_BUF_LEN - 1, "%d\n", voltage);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+}
+
+static int reg_debug_voltage_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static const struct file_operations reg_voltage_fops = {
+	.write	= reg_debug_voltage_write,
+	.open   = reg_debug_voltage_open,
+	.read	= reg_debug_voltage_read,
+};
+
+static int reg_debug_mode_set(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	unsigned int mode = val;
+	int ret;
+
+	ret = regulator_set_mode(regulator, mode);
+	if (ret)
+		rdev_err(regulator->rdev, "set mode=%u failed, ret=%d\n",
+			mode, ret);
+
+	return ret;
+}
+
+static int reg_debug_mode_get(void *data, u64 *val)
+{
+	struct regulator *regulator = data;
+	int mode;
+
+	mode = regulator_get_mode(regulator);
+	if (mode < 0) {
+		rdev_err(regulator->rdev, "get mode failed, ret=%d\n", mode);
+		return mode;
+	}
+
+	*val = mode;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get, reg_debug_mode_set,
+			"%llu\n");
+
+static int reg_debug_set_load(void *data, u64 val)
+{
+	struct regulator *regulator = data;
+	int load = val;
+	int ret;
+
+	ret = regulator_set_load(regulator, load);
+	if (ret)
+		rdev_err(regulator->rdev, "set load=%d failed, ret=%d\n",
+			load, ret);
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(reg_set_load_fops, reg_debug_mode_get,
+			reg_debug_set_load, "%llu\n");
+
+static int reg_debug_consumers_show(struct seq_file *m, void *v)
+{
+	struct regulator_dev *rdev = m->private;
+	struct regulator *reg;
+	char *supply_name;
+
+	mutex_lock(&rdev->mutex);
+
+	/* Print a header if there are consumers. */
+	if (rdev->open_count)
+		seq_printf(m, "%-32s EN    Min_uV   Max_uV  load_uA\n",
+			"Device-Supply");
+
+	list_for_each_entry(reg, &rdev->consumer_list, list) {
+		if (reg->supply_name)
+			supply_name = reg->supply_name;
+		else
+			supply_name = "(null)-(null)";
+
+		seq_printf(m, "%-32s %c   %8d %8d %8d\n", supply_name,
+			(reg->enabled ? 'Y' : 'N'), reg->min_uV, reg->max_uV,
+			reg->uA_load);
+	}
+
+	mutex_unlock(&rdev->mutex);
+
+	return 0;
+}
+
+static int reg_debug_consumers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, reg_debug_consumers_show, inode->i_private);
+}
+
+static const struct file_operations reg_consumers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= reg_debug_consumers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void rdev_deinit_debugfs(struct regulator_dev *rdev)
+{
+	if (!IS_ERR_OR_NULL(rdev)) {
+		debugfs_remove_recursive(rdev->debugfs);
+		if (rdev->debug_consumer)
+			rdev->debug_consumer->debugfs = NULL;
+		regulator_put(rdev->debug_consumer);
+	}
+}
+
 static void rdev_init_debugfs(struct regulator_dev *rdev)
 {
 	struct device *parent = rdev->dev.parent;
 	const char *rname = rdev_get_name(rdev);
 	char name[NAME_MAX];
+	struct regulator *regulator;
+	const struct regulator_ops *ops;
+	mode_t mode;
 
 	/* Avoid duplicate debugfs directory names */
 	if (parent && rname == rdev->desc->name) {
@@ -3888,8 +4207,75 @@
 			   &rdev->open_count);
 	debugfs_create_u32("bypass_count", 0444, rdev->debugfs,
 			   &rdev->bypass_count);
+	debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
+			    &reg_consumers_fops);
+
+	regulator = regulator_get(NULL, rdev_get_name(rdev));
+	if (IS_ERR(regulator)) {
+		rdev_err(rdev, "regulator get failed, ret=%ld\n",
+			PTR_ERR(regulator));
+		return;
+	}
+	rdev->debug_consumer = regulator;
+
+	rdev->open_offset = 1;
+	ops = rdev->desc->ops;
+
+	debugfs_create_file("enable", 0644, rdev->debugfs, regulator,
+				&reg_enable_fops);
+	if (ops->set_bypass)
+		debugfs_create_file("bypass", 0644, rdev->debugfs, regulator,
+					&reg_bypass_enable_fops);
+
+	mode = 0;
+	if (ops->is_enabled)
+		mode |= 0444;
+	if (ops->disable)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("force_disable", mode, rdev->debugfs,
+					regulator, &reg_force_disable_fops);
+
+	mode = 0;
+	if (ops->get_voltage || ops->get_voltage_sel)
+		mode |= 0444;
+	if (ops->set_voltage || ops->set_voltage_sel)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("voltage", mode, rdev->debugfs, regulator,
+					&reg_voltage_fops);
+
+	mode = 0;
+	if (ops->get_mode)
+		mode |= 0444;
+	if (ops->set_mode)
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("mode", mode, rdev->debugfs, regulator,
+					&reg_mode_fops);
+
+	mode = 0;
+	if (ops->get_mode)
+		mode |= 0444;
+	if (ops->set_load || (ops->get_optimum_mode && ops->set_mode))
+		mode |= 0200;
+	if (mode)
+		debugfs_create_file("load", mode, rdev->debugfs, regulator,
+					&reg_set_load_fops);
 }
 
+#else
+
+static inline void rdev_deinit_debugfs(struct regulator_dev *rdev)
+{
+}
+
+static inline void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+}
+
+#endif
+
 static int regulator_register_resolve_supply(struct device *dev, void *data)
 {
 	struct regulator_dev *rdev = dev_to_rdev(dev);
@@ -4057,6 +4443,8 @@
 
 	dev_set_drvdata(&rdev->dev, rdev);
 	rdev_init_debugfs(rdev);
+	rdev->proxy_consumer = regulator_proxy_consumer_register(dev,
+							config->of_node);
 
 	/* try to resolve regulators supply since a new one was registered */
 	class_for_each_device(&regulator_class, NULL, NULL,
@@ -4096,8 +4484,9 @@
 			regulator_disable(rdev->supply);
 		regulator_put(rdev->supply);
 	}
+	regulator_proxy_consumer_unregister(rdev->proxy_consumer);
+	rdev_deinit_debugfs(rdev);
 	mutex_lock(&regulator_list_mutex);
-	debugfs_remove_recursive(rdev->debugfs);
 	flush_work(&rdev->disable_work.work);
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index c74ac87..99c68ab 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -29,6 +29,7 @@
 	int uA_load;
 	int min_uV;
 	int max_uV;
+	int enabled;
 	char *supply_name;
 	struct device_attribute dev_attr;
 	struct regulator_dev *rdev;
diff --git a/drivers/regulator/proxy-consumer.c b/drivers/regulator/proxy-consumer.c
new file mode 100644
index 0000000..99b1959
--- /dev/null
+++ b/drivers/regulator/proxy-consumer.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/proxy-consumer.h>
+
+struct proxy_consumer {
+	struct list_head	list;
+	struct regulator	*reg;
+	bool			enable;
+	int			min_uV;
+	int			max_uV;
+	u32			current_uA;
+};
+
+static DEFINE_MUTEX(proxy_consumer_list_mutex);
+static LIST_HEAD(proxy_consumer_list);
+static bool proxy_consumers_removed;
+
+/**
+ * regulator_proxy_consumer_register() - conditionally register a proxy consumer
+ *		 for the specified regulator and set its boot time parameters
+ * @reg_dev:		Device pointer of the regulator
+ * @reg_node:		Device node pointer of the regulator
+ *
+ * Returns a struct proxy_consumer pointer corresponding to the regulator on
+ * success, ERR_PTR() if an error occurred, or NULL if no proxy consumer is
+ * needed for the regulator.  This function calls
+ * regulator_get(reg_dev, "proxy") after first checking if any proxy consumer
+ * properties are present in the reg_node device node.  After that, the voltage,
+ * minimum current, and/or the enable state will be set based upon the device
+ * node property values.
+ */
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+			struct device_node *reg_node)
+{
+	struct proxy_consumer *consumer = NULL;
+	const char *reg_name = "";
+	u32 voltage[2] = {0};
+	int rc;
+
+	/* Return immediately if no proxy consumer properties are specified. */
+	if (!of_find_property(reg_node, "qcom,proxy-consumer-enable", NULL)
+	    && !of_find_property(reg_node, "qcom,proxy-consumer-voltage", NULL)
+	    && !of_find_property(reg_node, "qcom,proxy-consumer-current", NULL))
+		return NULL;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+
+	/* Do not register new consumers if they cannot be removed later. */
+	if (proxy_consumers_removed) {
+		rc = -EPERM;
+		goto unlock;
+	}
+
+	if (dev_name(reg_dev))
+		reg_name = dev_name(reg_dev);
+
+	consumer = kzalloc(sizeof(*consumer), GFP_KERNEL);
+	if (!consumer) {
+		rc = -ENOMEM;
+		goto unlock;
+	}
+
+	consumer->enable
+		= of_property_read_bool(reg_node, "qcom,proxy-consumer-enable");
+	of_property_read_u32(reg_node, "qcom,proxy-consumer-current",
+				&consumer->current_uA);
+	rc = of_property_read_u32_array(reg_node, "qcom,proxy-consumer-voltage",
+					voltage, 2);
+	if (!rc) {
+		consumer->min_uV = voltage[0];
+		consumer->max_uV = voltage[1];
+	}
+
+	dev_dbg(reg_dev, "proxy consumer request: enable=%d, voltage_range=[%d, %d] uV, min_current=%d uA\n",
+		consumer->enable, consumer->min_uV, consumer->max_uV,
+		consumer->current_uA);
+
+	consumer->reg = regulator_get(reg_dev, "proxy");
+	if (IS_ERR_OR_NULL(consumer->reg)) {
+		rc = PTR_ERR(consumer->reg);
+		pr_err("regulator_get() failed for %s, rc=%d\n", reg_name, rc);
+		goto unlock;
+	}
+
+	if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+		rc = regulator_set_voltage(consumer->reg, consumer->min_uV,
+						consumer->max_uV);
+		if (rc) {
+			pr_err("regulator_set_voltage %s failed, rc=%d\n",
+				reg_name, rc);
+			goto free_regulator;
+		}
+	}
+
+	if (consumer->current_uA > 0) {
+		rc = regulator_set_load(consumer->reg,
+						consumer->current_uA);
+		if (rc < 0) {
+			pr_err("regulator_set_load %s failed, rc=%d\n",
+				reg_name, rc);
+			goto remove_voltage;
+		}
+	}
+
+	if (consumer->enable) {
+		rc = regulator_enable(consumer->reg);
+		if (rc) {
+			pr_err("regulator_enable %s failed, rc=%d\n", reg_name,
+				rc);
+			goto remove_current;
+		}
+	}
+
+	list_add(&consumer->list, &proxy_consumer_list);
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return consumer;
+
+remove_current:
+	regulator_set_load(consumer->reg, 0);
+remove_voltage:
+	regulator_set_voltage(consumer->reg, 0, INT_MAX);
+free_regulator:
+	regulator_put(consumer->reg);
+unlock:
+	kfree(consumer);
+	mutex_unlock(&proxy_consumer_list_mutex);
+	return ERR_PTR(rc);
+}
+
+/* proxy_consumer_list_mutex must be held by caller. */
+static int regulator_proxy_consumer_remove(struct proxy_consumer *consumer)
+{
+	int rc = 0;
+
+	if (consumer->enable) {
+		rc = regulator_disable(consumer->reg);
+		if (rc)
+			pr_err("regulator_disable failed, rc=%d\n", rc);
+	}
+
+	if (consumer->current_uA > 0) {
+		rc = regulator_set_load(consumer->reg, 0);
+		if (rc < 0)
+			pr_err("regulator_set_load failed, rc=%d\n",
+				rc);
+	}
+
+	if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+		rc = regulator_set_voltage(consumer->reg, 0, INT_MAX);
+		if (rc)
+			pr_err("regulator_set_voltage failed, rc=%d\n", rc);
+	}
+
+	regulator_put(consumer->reg);
+	list_del(&consumer->list);
+	kfree(consumer);
+
+	return rc;
+}
+
+/**
+ * regulator_proxy_consumer_unregister() - unregister a proxy consumer and
+ *					   remove its boot time requests
+ * @consumer:		Pointer to proxy_consumer to be removed
+ *
+ * Returns 0 on success or errno on failure.  This function removes all requests
+ * made by the proxy consumer in regulator_proxy_consumer_register() and then
+ * frees the consumer's resources.
+ */
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(consumer))
+		return 0;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+	if (!proxy_consumers_removed)
+		rc = regulator_proxy_consumer_remove(consumer);
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return rc;
+}
+
+/*
+ * Remove all proxy requests at late_initcall_sync.  The assumption is that all
+ * devices have probed at this point and made their own regulator requests.
+ */
+static int __init regulator_proxy_consumer_remove_all(void)
+{
+	struct proxy_consumer *consumer;
+	struct proxy_consumer *temp;
+
+	mutex_lock(&proxy_consumer_list_mutex);
+	proxy_consumers_removed = true;
+
+	if (!list_empty(&proxy_consumer_list))
+		pr_info("removing regulator proxy consumer requests\n");
+
+	list_for_each_entry_safe(consumer, temp, &proxy_consumer_list, list) {
+		regulator_proxy_consumer_remove(consumer);
+	}
+	mutex_unlock(&proxy_consumer_list_mutex);
+
+	return 0;
+}
+late_initcall_sync(regulator_proxy_consumer_remove_all);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 3a9c7aa..6d92f29 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -190,7 +190,7 @@
 	timeout = jiffies + usecs_to_jiffies(LLCC_STATUS_READ_DELAY);
 	while (time_before(jiffies, timeout)) {
 		regmap_read(drv->llcc_map, status_reg, &slice_status);
-		if (slice_status & status)
+		if (!(slice_status & status))
 			return 0;
 	}
 
@@ -231,7 +231,7 @@
 	act_ctrl_val |= ACT_CTRL_ACT_TRIG;
 
 	rc = llcc_update_act_ctrl(drv, desc->llcc_slice_id, act_ctrl_val,
-				  ACTIVATE);
+				  DEACTIVATE);
 
 	__set_bit(desc->llcc_slice_id, drv->llcc_slice_map);
 	mutex_unlock(&drv->slice_mutex);
@@ -273,7 +273,7 @@
 	act_ctrl_val |= ACT_CTRL_ACT_TRIG;
 
 	rc = llcc_update_act_ctrl(drv, desc->llcc_slice_id, act_ctrl_val,
-				  DEACTIVATE);
+				  ACTIVATE);
 
 	__clear_bit(desc->llcc_slice_id, drv->llcc_slice_map);
 	mutex_unlock(&drv->slice_mutex);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5ec3a59..6c8154d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,10 +24,13 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spmi.h>
+#include <linux/syscore_ops.h>
 
 /* PMIC Arbiter configuration registers */
 #define PMIC_ARB_VERSION		0x0000
 #define PMIC_ARB_VERSION_V2_MIN		0x20010000
+#define PMIC_ARB_VERSION_V3_MIN		0x30000000
+#define PMIC_ARB_VERSION_V5_MIN		0x50000000
 #define PMIC_ARB_INT_EN			0x0004
 
 /* PMIC Arbiter channel registers offsets */
@@ -38,7 +41,6 @@
 #define PMIC_ARB_WDATA1			0x14
 #define PMIC_ARB_RDATA0			0x18
 #define PMIC_ARB_RDATA1			0x1C
-#define PMIC_ARB_REG_CHNL(N)		(0x800 + 0x4 * (N))
 
 /* Mapping Table */
 #define SPMI_MAPPING_TABLE_REG(N)	(0x0B00 + (4 * (N)))
@@ -51,6 +53,8 @@
 #define SPMI_MAPPING_TABLE_TREE_DEPTH	16	/* Maximum of 16-bits */
 #define PMIC_ARB_MAX_PPID		BIT(12) /* PPID is 12bit */
 #define PMIC_ARB_CHAN_VALID		BIT(15)
+#define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg)	((reg) & BIT(24))
+#define INVALID_EE			(-1)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)	(0x0700 + (4 * (N)))
@@ -58,10 +62,10 @@
 
 /* Channel Status fields */
 enum pmic_arb_chnl_status {
-	PMIC_ARB_STATUS_DONE	= (1 << 0),
-	PMIC_ARB_STATUS_FAILURE	= (1 << 1),
-	PMIC_ARB_STATUS_DENIED	= (1 << 2),
-	PMIC_ARB_STATUS_DROPPED	= (1 << 3),
+	PMIC_ARB_STATUS_DONE	= BIT(0),
+	PMIC_ARB_STATUS_FAILURE	= BIT(1),
+	PMIC_ARB_STATUS_DENIED	= BIT(2),
+	PMIC_ARB_STATUS_DROPPED	= BIT(3),
 };
 
 /* Command register fields */
@@ -85,6 +89,15 @@
 	PMIC_ARB_OP_ZERO_WRITE = 16,
 };
 
+/*
+ * PMIC arbiter version 5 uses different register offsets for read/write vs
+ * observer channels.
+ */
+enum pmic_arb_channel {
+	PMIC_ARB_CHANNEL_RW,
+	PMIC_ARB_CHANNEL_OBS,
+};
+
 /* Maximum number of support PMIC peripherals */
 #define PMIC_ARB_MAX_PERIPHS		512
 #define PMIC_ARB_TIMEOUT_US		100
@@ -96,14 +109,32 @@
 /* interrupt enable bit */
 #define SPMI_PIC_ACC_ENABLE_BIT		BIT(0)
 
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+	((((slave_id) & 0xF)   << 28) | \
+	(((periph_id) & 0xFF)  << 20) | \
+	(((irq_id)    & 0x7)   << 16) | \
+	(((apid)      & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq)  (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq)  (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq)  (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0)  & 0x1FF)
+
 struct pmic_arb_ver_ops;
 
+struct apid_data {
+	u16		ppid;
+	u8		write_owner;
+	u8		irq_owner;
+};
+
 /**
- * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ * spmi_pmic_arb - SPMI PMIC Arbiter object
  *
  * @rd_base:		on v1 "core", on v2 "observer" register base off DT.
  * @wr_base:		on v1 "core", on v2 "chnls"    register base off DT.
  * @intr:		address of the SPMI interrupt control registers.
+ * @acc_status:		address of SPMI ACC interrupt status registers.
  * @cnfg:		address of the PMIC Arbiter configuration registers.
  * @lock:		lock to synchronize accesses.
  * @channel:		execution environment channel to use for accesses.
@@ -111,18 +142,19 @@
  * @ee:			the current Execution Environment
  * @min_apid:		minimum APID (used for bounding IRQ search)
  * @max_apid:		maximum APID
+ * @max_periph:		maximum number of PMIC peripherals supported by HW.
  * @mapping_table:	in-memory copy of PPID -> APID mapping table.
  * @domain:		irq domain object for PMIC IRQ domain
  * @spmic:		SPMI controller object
- * @apid_to_ppid:	in-memory copy of APID -> PPID mapping table.
  * @ver_ops:		version dependent operations.
- * @ppid_to_chan	in-memory copy of PPID -> channel (APID) mapping table.
+ * @ppid_to_apid	in-memory copy of PPID -> channel (APID) mapping table.
  *			v2 only.
  */
-struct spmi_pmic_arb_dev {
+struct spmi_pmic_arb {
 	void __iomem		*rd_base;
 	void __iomem		*wr_base;
 	void __iomem		*intr;
+	void __iomem		*acc_status;
 	void __iomem		*cnfg;
 	void __iomem		*core;
 	resource_size_t		core_size;
@@ -132,19 +164,24 @@
 	u8			ee;
 	u16			min_apid;
 	u16			max_apid;
+	u16			max_periph;
 	u32			*mapping_table;
 	DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
 	struct irq_domain	*domain;
 	struct spmi_controller	*spmic;
-	u16			*apid_to_ppid;
 	const struct pmic_arb_ver_ops *ver_ops;
-	u16			*ppid_to_chan;
-	u16			last_channel;
+	u16			*ppid_to_apid;
+	u16			last_apid;
+	struct apid_data	apid_data[PMIC_ARB_MAX_PERIPHS];
 };
+static struct spmi_pmic_arb *the_pa;
 
 /**
  * pmic_arb_ver: version dependent functionality.
  *
+ * @ver_str:		version string.
+ * @ppid_to_apid:	finds the apid for a given ppid.
+ * @mode:		access rights to specified pmic peripheral.
  * @non_data_cmd:	on v1 issues an spmi non-data command.
  *			on v2 no HW support, returns -EOPNOTSUPP.
  * @offset:		on v1 offset of per-ee channel.
@@ -158,30 +195,37 @@
  *			on v2 offset of SPMI_PIC_IRQ_STATUSn.
  * @irq_clear:		on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
  *			on v2 offset of SPMI_PIC_IRQ_CLEARn.
+ * @channel_map_offset:	offset of PMIC_ARB_REG_CHNLn
  */
 struct pmic_arb_ver_ops {
+	const char *ver_str;
+	int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+			u16 *apid);
+	int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+			mode_t *mode);
 	/* spmi commands (read_cmd, write_cmd, cmd) functionality */
-	int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
-		      u32 *offset);
+	int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+			enum pmic_arb_channel ch_type, u32 *offset);
 	u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
 	int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
 	/* Interrupts controller functionality (offset of PIC registers) */
-	u32 (*owner_acc_status)(u8 m, u8 n);
-	u32 (*acc_enable)(u8 n);
-	u32 (*irq_status)(u8 n);
-	u32 (*irq_clear)(u8 n);
+	u32 (*owner_acc_status)(u8 m, u16 n);
+	u32 (*acc_enable)(u16 n);
+	u32 (*irq_status)(u16 n);
+	u32 (*irq_clear)(u16 n);
+	u32 (*channel_map_offset)(u16 n);
 };
 
-static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->wr_base + offset);
+	writel_relaxed(val, pa->wr_base + offset);
 }
 
-static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->rd_base + offset);
+	writel_relaxed(val, pa->rd_base + offset);
 }
 
 /**
@@ -190,9 +234,10 @@
  * @reg:	register's address
  * @buf:	output parameter, length must be bc + 1
  */
-static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
 {
-	u32 data = __raw_readl(dev->rd_base + reg);
+	u32 data = __raw_readl(pa->rd_base + reg);
+
 	memcpy(buf, &data, (bc & 3) + 1);
 }
 
@@ -203,23 +248,25 @@
  * @buf:	buffer to write. length must be bc + 1.
  */
 static void
-pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
 {
 	u32 data = 0;
+
 	memcpy(&data, buf, (bc & 3) + 1);
-	__raw_writel(data, dev->wr_base + reg);
+	pmic_arb_base_write(pa, reg, data);
 }
 
 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
-				  void __iomem *base, u8 sid, u16 addr)
+				  void __iomem *base, u8 sid, u16 addr,
+				  enum pmic_arb_channel ch_type)
 {
-	struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	u32 status = 0;
 	u32 timeout = PMIC_ARB_TIMEOUT_US;
 	u32 offset;
 	int rc;
 
-	rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, ch_type, &offset);
 	if (rc)
 		return rc;
 
@@ -264,22 +311,23 @@
 static int
 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u32 cmd;
 	int rc;
 	u32 offset;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+	rc = pa->ver_ops->offset(pa, sid, 0, PMIC_ARB_CHANNEL_RW, &offset);
 	if (rc)
 		return rc;
 
 	cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0,
+				    PMIC_ARB_CHANNEL_RW);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -293,7 +341,7 @@
 /* Non-data command */
 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 
 	dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
 
@@ -301,23 +349,35 @@
 	if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
 		return -EINVAL;
 
-	return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
+	return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
 }
 
 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			     u16 addr, u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
 	u32 offset;
+	mode_t mode;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, PMIC_ARB_CHANNEL_OBS, &offset);
 	if (rc)
 		return rc;
 
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & 0400)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible read from peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -ENODEV;
+	}
+
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
 			"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -335,40 +395,52 @@
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr,
+				    PMIC_ARB_CHANNEL_OBS);
 	if (rc)
 		goto done;
 
-	pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
+	pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
 		     min_t(u8, bc, 3));
 
 	if (bc > 3)
-		pa_read_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_RDATA1, bc - 4);
+		pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
 
 done:
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 	return rc;
 }
 
 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			      u16 addr, const u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
 	u32 offset;
+	mode_t mode;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, PMIC_ARB_CHANNEL_RW, &offset);
 	if (rc)
 		return rc;
 
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & 0200)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible write to peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -ENODEV;
+	}
+
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
 			"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -388,20 +460,19 @@
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
 	/* Write data to FIFOs */
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
-		      min_t(u8, bc, 3));
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
 	if (bc > 3)
-		pa_write_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_WDATA1, bc - 4);
+		pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
 
 	/* Start the transaction */
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr,
+				    PMIC_ARB_CHANNEL_RW);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -427,9 +498,9 @@
 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
 			       size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
 			       (per << 8) + reg, buf, len))
@@ -440,9 +511,9 @@
 
 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
 			      (per << 8) + reg, buf, len))
@@ -451,145 +522,175 @@
 				    d->irq);
 }
 
-static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
+{
+	u16 ppid = pa->apid_data[apid].ppid;
+	u8 sid = ppid >> 8;
+	u8 per = ppid & 0xFF;
+	u8 irq_mask = BIT(id);
+
+	dev_err_ratelimited(&pa->spmic->dev,
+		"cleanup_irq apid=%d sid=0x%x per=0x%x irq=%d\n",
+		apid, sid, per, id);
+	writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+
+	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+		       (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
+		dev_err_ratelimited(&pa->spmic->dev,
+				"failed to ack irq_mask = 0x%x for ppid = %x\n",
+				irq_mask, ppid);
+
+	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+			       (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
+		dev_err_ratelimited(&pa->spmic->dev,
+				"failed to ack irq_mask = 0x%x for ppid = %x\n",
+				irq_mask, ppid);
+}
+
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid, bool show)
 {
 	unsigned int irq;
 	u32 status;
 	int id;
+	u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+	u8 per = pa->apid_data[apid].ppid & 0xFF;
 
 	status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
 	while (status) {
 		id = ffs(status) - 1;
-		status &= ~(1 << id);
-		irq = irq_find_mapping(pa->domain,
-				       pa->apid_to_ppid[apid] << 16
-				     | id << 8
-				     | apid);
-		generic_handle_irq(irq);
+		status &= ~BIT(id);
+		irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+		if (irq == 0) {
+			cleanup_irq(pa, apid, id);
+			continue;
+		}
+		if (show) {
+			struct irq_desc *desc;
+			const char *name = "null";
+
+			desc = irq_to_desc(irq);
+			if (desc == NULL)
+				name = "stray irq";
+			else if (desc->action && desc->action->name)
+				name = desc->action->name;
+
+			pr_warn("spmi_show_resume_irq: %d triggered [0x%01x, 0x%02x, 0x%01x] %s\n",
+				irq, sid, per, id, name);
+		} else {
+			generic_handle_irq(irq);
+		}
+	}
+}
+
+static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show)
+{
+	int first = pa->min_apid >> 5;
+	int last = pa->max_apid >> 5;
+	u32 status, enable;
+	int i, id, apid;
+
+	for (i = first; i <= last; ++i) {
+		status = readl_relaxed(pa->acc_status +
+				      pa->ver_ops->owner_acc_status(pa->ee, i));
+		while (status) {
+			id = ffs(status) - 1;
+			status &= ~BIT(id);
+			apid = id + i * 32;
+			enable = readl_relaxed(pa->intr +
+					pa->ver_ops->acc_enable(apid));
+			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
+				periph_interrupt(pa, apid, show);
+		}
 	}
 }
 
 static void pmic_arb_chained_irq(struct irq_desc *desc)
 {
-	struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+	struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
-	void __iomem *intr = pa->intr;
-	int first = pa->min_apid >> 5;
-	int last = pa->max_apid >> 5;
-	u32 status;
-	int i, id;
 
 	chained_irq_enter(chip, desc);
-
-	for (i = first; i <= last; ++i) {
-		status = readl_relaxed(intr +
-				      pa->ver_ops->owner_acc_status(pa->ee, i));
-		while (status) {
-			id = ffs(status) - 1;
-			status &= ~(1 << id);
-			periph_interrupt(pa, id + i * 32);
-		}
-	}
-
+	__pmic_arb_chained_irq(pa, false);
 	chained_irq_exit(chip, desc);
 }
 
 static void qpnpint_irq_ack(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
 	u8 data;
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
+	writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
 
-	data = 1 << irq;
+	data = BIT(irq);
 	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static void qpnpint_irq_mask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 data = BIT(irq);
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (status & SPMI_PIC_ACC_ENABLE_BIT) {
-		status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
-		writel_relaxed(status, pa->intr +
-			       pa->ver_ops->acc_enable(apid));
-	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
-
-	data = 1 << irq;
 	qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
+	u8 buf[2];
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
-		writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
-				pa->intr + pa->ver_ops->acc_enable(apid));
+	writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
+		pa->intr + pa->ver_ops->acc_enable(apid));
+
+	qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
+	if (!(buf[0] & BIT(irq))) {
+		/*
+		 * Since the interrupt is currently disabled, write to both the
+		 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+		 * cannot be triggered when the interrupt is enabled
+		 */
+		buf[0] = BIT(irq);
+		buf[1] = BIT(irq);
+		qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
 	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
-
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
-}
-
-static void qpnpint_irq_enable(struct irq_data *d)
-{
-	u8 irq  = d->hwirq >> 8;
-	u8 data;
-
-	qpnpint_irq_unmask(d);
-
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	struct spmi_pmic_arb_qpnpint_type type;
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 bit_mask_irq = BIT(irq);
 
 	qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
 
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-		type.type |= 1 << irq;
+		type.type |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_RISING)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_FALLING)
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	} else {
 		if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
 		    (flow_type & (IRQF_TRIGGER_LOW)))
 			return -EINVAL;
 
-		type.type &= ~(1 << irq); /* level trig */
+		type.type &= ~bit_mask_irq; /* level trig */
 		if (flow_type & IRQF_TRIGGER_HIGH)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		else
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	}
 
 	qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+	if (flow_type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else
+		irq_set_handler_locked(d, handle_level_irq);
+
 	return 0;
 }
 
@@ -597,7 +698,7 @@
 				     enum irqchip_irq_state which,
 				     bool *state)
 {
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
 	u8 status = 0;
 
 	if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -611,7 +712,6 @@
 
 static struct irq_chip pmic_arb_irqchip = {
 	.name		= "pmic_arb",
-	.irq_enable	= qpnpint_irq_enable,
 	.irq_ack	= qpnpint_irq_ack,
 	.irq_mask	= qpnpint_irq_mask,
 	.irq_unmask	= qpnpint_irq_unmask,
@@ -621,48 +721,6 @@
 			| IRQCHIP_SKIP_SET_WAKE,
 };
 
-struct spmi_pmic_arb_irq_spec {
-	unsigned slave:4;
-	unsigned per:8;
-	unsigned irq:3;
-};
-
-static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
-				struct spmi_pmic_arb_irq_spec *spec,
-				u8 *apid)
-{
-	u16 ppid = spec->slave << 8 | spec->per;
-	u32 *mapping_table = pa->mapping_table;
-	int index = 0, i;
-	u32 data;
-
-	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
-		if (!test_and_set_bit(index, pa->mapping_table_valid))
-			mapping_table[index] = readl_relaxed(pa->cnfg +
-						SPMI_MAPPING_TABLE_REG(index));
-
-		data = mapping_table[index];
-
-		if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
-			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-				return 0;
-			}
-		} else {
-			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-				return 0;
-			}
-		}
-	}
-
-	return -ENODEV;
-}
-
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 					   struct device_node *controller,
 					   const u32 *intspec,
@@ -670,10 +728,9 @@
 					   unsigned long *out_hwirq,
 					   unsigned int *out_type)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
-	struct spmi_pmic_arb_irq_spec spec;
-	int err;
-	u8 apid;
+	struct spmi_pmic_arb *pa = d->host_data;
+	int rc;
+	u16 apid;
 
 	dev_dbg(&pa->spmic->dev,
 		"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -686,15 +743,21 @@
 	if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
 		return -EINVAL;
 
-	spec.slave = intspec[0];
-	spec.per   = intspec[1];
-	spec.irq   = intspec[2];
+	rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
+			(intspec[1] << 8), &apid);
+	if (rc < 0) {
+		dev_err(&pa->spmic->dev,
+		"failed to xlate sid = 0x%x, periph = 0x%x, irq = %u rc = %d\n",
+		intspec[0], intspec[1], intspec[2], rc);
+		return rc;
+	}
 
-	err = search_mapping_table(pa, &spec, &apid);
-	if (err)
-		return err;
-
-	pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+	if (pa->apid_data[apid].irq_owner != pa->ee) {
+		dev_err(&pa->spmic->dev, "failed to xlate sid = 0x%x, periph = 0x%x, irq = %u: ee=%u but owner=%u\n",
+			intspec[0], intspec[1], intspec[2], pa->ee,
+			pa->apid_data[apid].irq_owner);
+		return -ENODEV;
+	}
 
 	/* Keep track of {max,min}_apid for bounding search during interrupt */
 	if (apid > pa->max_apid)
@@ -702,10 +765,7 @@
 	if (apid < pa->min_apid)
 		pa->min_apid = apid;
 
-	*out_hwirq = spec.slave << 24
-		   | spec.per   << 16
-		   | spec.irq   << 8
-		   | apid;
+	*out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
 	*out_type  = intspec[3] & IRQ_TYPE_SENSE_MASK;
 
 	dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -717,7 +777,7 @@
 				  unsigned int virq,
 				  irq_hw_number_t hwirq)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
+	struct spmi_pmic_arb *pa = d->host_data;
 
 	dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
 
@@ -727,26 +787,88 @@
 	return 0;
 }
 
+static int
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+	u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
+	u32 *mapping_table = pa->mapping_table;
+	int index = 0, i;
+	u16 apid_valid;
+	u32 data;
+
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (apid_valid & PMIC_ARB_CHAN_VALID) {
+		*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+		return 0;
+	}
+
+	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+		if (!test_and_set_bit(index, pa->mapping_table_valid))
+			mapping_table[index] = readl_relaxed(pa->cnfg +
+						SPMI_MAPPING_TABLE_REG(index));
+
+		data = mapping_table[index];
+
+		if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
+			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		} else {
+			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int
+pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	*mode = 0600;
+	return 0;
+}
+
 /* v1 offset per ee */
 static int
-pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
 {
 	*offset = 0x800 + 0x80 * pa->channel;
 	return 0;
 }
 
-static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
 {
 	u32 regval, offset;
-	u16 chan;
+	u16 apid;
 	u16 id;
 
 	/*
 	 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-	 * ppid_to_chan is an in-memory invert of that table.
+	 * ppid_to_apid is an in-memory invert of that table.
 	 */
-	for (chan = pa->last_channel; ; chan++) {
-		offset = PMIC_ARB_REG_CHNL(chan);
+	for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
+		regval = readl_relaxed(pa->cnfg +
+				      SPMI_OWNERSHIP_TABLE_REG(apid));
+		pa->apid_data[apid].irq_owner
+			= SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+		pa->apid_data[apid].write_owner = pa->apid_data[apid].irq_owner;
+
+		offset = pa->ver_ops->channel_map_offset(apid);
 		if (offset >= pa->core_size)
 			break;
 
@@ -755,33 +877,168 @@
 			continue;
 
 		id = (regval >> 8) & PMIC_ARB_PPID_MASK;
-		pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+		pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
+		pa->apid_data[apid].ppid = id;
 		if (id == ppid) {
-			chan |= PMIC_ARB_CHAN_VALID;
+			apid |= PMIC_ARB_CHAN_VALID;
 			break;
 		}
 	}
-	pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+	pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
 
-	return chan;
+	return apid;
 }
 
-
-/* v2 offset per ppid (chan) and per ee */
 static int
-pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
 {
 	u16 ppid = (sid << 8) | (addr >> 8);
-	u16 chan;
+	u16 apid_valid;
 
-	chan = pa->ppid_to_chan[ppid];
-	if (!(chan & PMIC_ARB_CHAN_VALID))
-		chan = pmic_arb_find_chan(pa, ppid);
-	if (!(chan & PMIC_ARB_CHAN_VALID))
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+		apid_valid = pmic_arb_find_apid(pa, ppid);
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
 		return -ENODEV;
-	chan &= ~PMIC_ARB_CHAN_VALID;
 
-	*offset = 0x1000 * pa->ee + 0x8000 * chan;
+	*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+	return 0;
+}
+
+static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa)
+{
+	u32 regval, offset;
+	u16 apid, prev_apid, ppid;
+	bool valid, is_irq_owner;
+
+	/*
+	 * PMIC_ARB_REG_CHNL is a table in HW mapping APID (channel) to PPID.
+	 * ppid_to_apid is an in-memory invert of that table.  In order to allow
+	 * multiple EE's to write to a single PPID in arbiter version 5, there
+	 * is more than one APID mapped to each PPID.  The owner field for each
+	 * of these mappings specifies the EE which is allowed to write to the
+	 * APID.  The owner of the last (highest) APID for a given PPID will
+	 * receive interrupts from the PPID.
+	 */
+	for (apid = 0; apid < pa->max_periph; apid++) {
+		offset = pa->ver_ops->channel_map_offset(apid);
+		if (offset >= pa->core_size)
+			break;
+
+		regval = readl_relaxed(pa->core + offset);
+		if (!regval)
+			continue;
+		ppid = (regval >> 8) & PMIC_ARB_PPID_MASK;
+		is_irq_owner = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval);
+
+		regval = readl_relaxed(pa->cnfg +
+				      SPMI_OWNERSHIP_TABLE_REG(apid));
+		pa->apid_data[apid].write_owner
+			= SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+
+		pa->apid_data[apid].irq_owner = is_irq_owner ?
+			pa->apid_data[apid].write_owner : INVALID_EE;
+
+		valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
+		prev_apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+		if (valid && is_irq_owner &&
+		    pa->apid_data[prev_apid].write_owner == pa->ee) {
+			/*
+			 * Duplicate PPID mapping after the one for this EE;
+			 * override the irq owner
+			 */
+			pa->apid_data[prev_apid].irq_owner
+				= pa->apid_data[apid].irq_owner;
+		} else if (!valid || is_irq_owner) {
+			/* First PPID mapping or duplicate for another EE */
+			pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
+		}
+
+		pa->apid_data[apid].ppid = ppid;
+		pa->last_apid = apid;
+	}
+
+	/* Dump the mapping table for debug purposes. */
+	dev_dbg(&pa->spmic->dev, "PPID APID Write-EE IRQ-EE\n");
+	for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) {
+		valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
+		apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+		if (valid)
+			dev_dbg(&pa->spmic->dev, "0x%03X %3u %2u %2u\n",
+				ppid, apid, pa->apid_data[apid].write_owner,
+				pa->apid_data[apid].irq_owner);
+	}
+
+	return 0;
+}
+
+static int
+pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+	u16 ppid = (sid << 8) | (addr >> 8);
+
+	if (!(pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID))
+		return -ENODEV;
+
+	*apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
+
+	return 0;
+}
+
+static int
+pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	u16 apid;
+	int rc;
+
+	rc = pa->ver_ops->ppid_to_apid(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*mode = 0;
+	*mode |= 0400;
+
+	if (pa->ee == pa->apid_data[apid].write_owner)
+		*mode |= 0200;
+	return 0;
+}
+
+/* v2 offset per ppid and per ee */
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
+{
+	u16 apid;
+	int rc;
+
+	rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*offset = 0x1000 * pa->ee + 0x8000 * apid;
+	return 0;
+}
+
+/*
+ * v5 offset per ee and per apid for observer channels and per apid for
+ * read/write channels.
+ */
+static int
+pmic_arb_offset_v5(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+		   enum pmic_arb_channel ch_type, u32 *offset)
+{
+	u16 apid;
+	int rc;
+
+	rc = pmic_arb_ppid_to_apid_v5(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*offset = (ch_type == PMIC_ARB_CHANNEL_OBS)
+			? 0x10000 * pa->ee + 0x80 * apid
+			: 0x10000 * apid;
 	return 0;
 }
 
@@ -795,47 +1052,85 @@
 	return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
 }
 
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
 {
 	return 0x20 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
 {
 	return 0x100000 + 0x1000 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
+{
+	return 0x200000 + 0x1000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_owner_acc_status_v5(u8 m, u16 n)
+{
+	return 0x10000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v1(u16 n)
 {
 	return 0x200 + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
 {
 	return 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_acc_enable_v5(u16 n)
+{
+	return 0x100 + 0x10000 * n;
+}
+
+static u32 pmic_arb_irq_status_v1(u16 n)
 {
 	return 0x600 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
 {
 	return 0x4 + 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_status_v5(u16 n)
+{
+	return 0x104 + 0x10000 * n;
+}
+
+static u32 pmic_arb_irq_clear_v1(u16 n)
 {
 	return 0xA00 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
 {
 	return 0x8 + 0x1000 * n;
 }
 
+static u32 pmic_arb_irq_clear_v5(u16 n)
+{
+	return 0x108 + 0x10000 * n;
+}
+
+static u32 pmic_arb_channel_map_offset_v2(u16 n)
+{
+	return 0x800 + 0x4 * n;
+}
+
+static u32 pmic_arb_channel_map_offset_v5(u16 n)
+{
+	return 0x900 + 0x4 * n;
+}
+
 static const struct pmic_arb_ver_ops pmic_arb_v1 = {
+	.ver_str		= "v1",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v1,
+	.mode			= pmic_arb_mode_v1_v3,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v1,
 	.offset			= pmic_arb_offset_v1,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v1,
@@ -843,9 +1138,13 @@
 	.acc_enable		= pmic_arb_acc_enable_v1,
 	.irq_status		= pmic_arb_irq_status_v1,
 	.irq_clear		= pmic_arb_irq_clear_v1,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
 };
 
 static const struct pmic_arb_ver_ops pmic_arb_v2 = {
+	.ver_str		= "v2",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v2,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
 	.offset			= pmic_arb_offset_v2,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
@@ -853,6 +1152,35 @@
 	.acc_enable		= pmic_arb_acc_enable_v2,
 	.irq_status		= pmic_arb_irq_status_v2,
 	.irq_clear		= pmic_arb_irq_clear_v2,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v3 = {
+	.ver_str		= "v3",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v1_v3,
+	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
+	.offset			= pmic_arb_offset_v2,
+	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
+	.owner_acc_status	= pmic_arb_owner_acc_status_v3,
+	.acc_enable		= pmic_arb_acc_enable_v2,
+	.irq_status		= pmic_arb_irq_status_v2,
+	.irq_clear		= pmic_arb_irq_clear_v2,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v2,
+};
+
+static const struct pmic_arb_ver_ops pmic_arb_v5 = {
+	.ver_str		= "v5",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v5,
+	.mode			= pmic_arb_mode_v2,
+	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
+	.offset			= pmic_arb_offset_v5,
+	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
+	.owner_acc_status	= pmic_arb_owner_acc_status_v5,
+	.acc_enable		= pmic_arb_acc_enable_v5,
+	.irq_status		= pmic_arb_irq_status_v5,
+	.irq_clear		= pmic_arb_irq_clear_v5,
+	.channel_map_offset	= pmic_arb_channel_map_offset_v5,
 };
 
 static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
@@ -860,15 +1188,24 @@
 	.xlate	= qpnpint_irq_domain_dt_translate,
 };
 
+static void spmi_pmic_arb_resume(void)
+{
+	if (spmi_show_resume_irq())
+		__pmic_arb_chained_irq(the_pa, true);
+}
+
+static struct syscore_ops spmi_pmic_arb_syscore_ops = {
+	.resume = spmi_pmic_arb_resume,
+};
+
 static int spmi_pmic_arb_probe(struct platform_device *pdev)
 {
-	struct spmi_pmic_arb_dev *pa;
+	struct spmi_pmic_arb *pa;
 	struct spmi_controller *ctrl;
 	struct resource *res;
 	void __iomem *core;
 	u32 channel, ee, hw_ver;
 	int err;
-	bool is_v1;
 
 	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
 	if (!ctrl)
@@ -878,7 +1215,19 @@
 	pa->spmic = ctrl;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+	if (!res) {
+		dev_err(&pdev->dev, "core resource not specified\n");
+		err = -EINVAL;
+		goto err_put_ctrl;
+	}
+
 	pa->core_size = resource_size(res);
+	if (pa->core_size <= 0x800) {
+		dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
+		err = -EINVAL;
+		goto err_put_ctrl;
+	}
+
 	core = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(core)) {
 		err = PTR_ERR(core);
@@ -886,18 +1235,24 @@
 	}
 
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
-	is_v1  = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
 
-	dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
-		hw_ver);
-
-	if (is_v1) {
+	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
 		pa->ver_ops = &pmic_arb_v1;
 		pa->wr_base = core;
 		pa->rd_base = core;
 	} else {
 		pa->core = core;
-		pa->ver_ops = &pmic_arb_v2;
+
+		if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
+			pa->ver_ops = &pmic_arb_v2;
+		else if (hw_ver < PMIC_ARB_VERSION_V5_MIN)
+			pa->ver_ops = &pmic_arb_v3;
+		else
+			pa->ver_ops = &pmic_arb_v5;
+
+		/* the apid to ppid table starts at PMIC_ARB_REG_CHNL0 */
+		pa->max_periph
+		     = (pa->core_size - pa->ver_ops->channel_map_offset(0)) / 4;
 
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						   "obsrvr");
@@ -915,22 +1270,33 @@
 			goto err_put_ctrl;
 		}
 
-		pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
 						PMIC_ARB_MAX_PPID,
-						sizeof(*pa->ppid_to_chan),
+						sizeof(*pa->ppid_to_apid),
 						GFP_KERNEL);
-		if (!pa->ppid_to_chan) {
+		if (!pa->ppid_to_apid) {
 			err = -ENOMEM;
 			goto err_put_ctrl;
 		}
 	}
 
+	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
+		 pa->ver_ops->ver_str, hw_ver);
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
 	pa->intr = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(pa->intr)) {
 		err = PTR_ERR(pa->intr);
 		goto err_put_ctrl;
 	}
+	pa->acc_status = pa->intr;
+
+	/*
+	 * PMIC arbiter v5 groups the IRQ control registers in the same hardware
+	 * module as the read/write channels.
+	 */
+	if (hw_ver >= PMIC_ARB_VERSION_V5_MIN)
+		pa->intr = pa->wr_base;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
 	pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
@@ -974,14 +1340,6 @@
 
 	pa->ee = ee;
 
-	pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
-					    sizeof(*pa->apid_to_ppid),
-					    GFP_KERNEL);
-	if (!pa->apid_to_ppid) {
-		err = -ENOMEM;
-		goto err_put_ctrl;
-	}
-
 	pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
 					sizeof(*pa->mapping_table), GFP_KERNEL);
 	if (!pa->mapping_table) {
@@ -1001,6 +1359,15 @@
 	ctrl->read_cmd = pmic_arb_read_cmd;
 	ctrl->write_cmd = pmic_arb_write_cmd;
 
+	if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
+		err = pmic_arb_read_apid_map_v5(pa);
+		if (err) {
+			dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
+				err);
+			goto err_put_ctrl;
+		}
+	}
+
 	dev_dbg(&pdev->dev, "adding irq domain\n");
 	pa->domain = irq_domain_add_tree(pdev->dev.of_node,
 					 &pmic_arb_irq_domain_ops, pa);
@@ -1011,11 +1378,14 @@
 	}
 
 	irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
+	enable_irq_wake(pa->irq);
 
 	err = spmi_controller_add(ctrl);
 	if (err)
 		goto err_domain_remove;
 
+	the_pa = pa;
+	register_syscore_ops(&spmi_pmic_arb_syscore_ops);
 	return 0;
 
 err_domain_remove:
@@ -1029,9 +1399,12 @@
 static int spmi_pmic_arb_remove(struct platform_device *pdev)
 {
 	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
-	struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+
 	spmi_controller_remove(ctrl);
 	irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
+	unregister_syscore_ops(&spmi_pmic_arb_syscore_ops);
+	the_pa = NULL;
 	irq_domain_remove(pa->domain);
 	spmi_controller_put(ctrl);
 	return 0;
@@ -1051,7 +1424,12 @@
 		.of_match_table = spmi_pmic_arb_match_table,
 	},
 };
-module_platform_driver(spmi_pmic_arb_driver);
+
+int __init spmi_pmic_arb_init(void)
+{
+	return platform_driver_register(&spmi_pmic_arb_driver);
+}
+arch_initcall(spmi_pmic_arb_init);
 
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:spmi_pmic_arb");
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 2b9b094..a8107cc 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -70,7 +70,7 @@
 	struct spmi_controller *ctrl = sdev->ctrl;
 	int err;
 
-	dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
+	dev_set_name(&sdev->dev, "spmi%d-%02x", ctrl->nr, sdev->usid);
 
 	err = device_add(&sdev->dev);
 	if (err < 0) {
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 6921082..a40c4c9 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -103,6 +103,7 @@
  *                      Data passed is old voltage cast to (void *).
  * PRE_DISABLE    Regulator is about to be disabled
  * ABORT_DISABLE  Regulator disable failed for some reason
+ * ENABLE         Regulator was enabled.
  *
  * NOTE: These events can be OR'ed together when passed into handler.
  */
@@ -119,6 +120,7 @@
 #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE	0x200
 #define REGULATOR_EVENT_PRE_DISABLE		0x400
 #define REGULATOR_EVENT_ABORT_DISABLE		0x800
+#define REGULATOR_EVENT_ENABLE			0x1000
 
 /**
  * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -223,6 +225,7 @@
 
 int regulator_count_voltages(struct regulator *regulator);
 int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_list_corner_voltage(struct regulator *regulator, int corner);
 int regulator_is_supported_voltage(struct regulator *regulator,
 				   int min_uV, int max_uV);
 unsigned int regulator_get_linear_step(struct regulator *regulator);
@@ -550,6 +553,11 @@
 	return -EINVAL;
 }
 
+static inline int regulator_list_corner_voltage(struct regulator *regulator,
+	int corner)
+{
+	return -EINVAL;
+}
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 37b5324..ee79113 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/regulator/consumer.h>
+#include <linux/regulator/proxy-consumer.h>
 
 struct regmap;
 struct regulator_dev;
@@ -87,6 +88,10 @@
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
  *	regulator_desc.n_voltages.  Voltages may be reported in any order.
+ * @list_corner_voltage: Return the maximum voltage in microvolts that
+ *	that can be physically configured for the regulator when operating at
+ *	the specified voltage corner or a negative errno if the corner value
+ *	can't be used on this system.
  *
  * @set_current_limit: Configure a limit for a current-limited regulator.
  *                     The driver should select the current closest to max_uA.
@@ -142,6 +147,7 @@
 
 	/* enumerate supported voltages */
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
+	int (*list_corner_voltage)(struct regulator_dev *, int corner);
 
 	/* get/set regulator voltage */
 	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
@@ -398,6 +404,7 @@
 	int exclusive;
 	u32 use_count;
 	u32 open_count;
+	u32 open_offset;
 	u32 bypass_count;
 
 	/* lists we belong to */
@@ -427,6 +434,8 @@
 
 	/* time when this regulator was disabled last time */
 	unsigned long last_off_jiffy;
+	struct proxy_consumer *proxy_consumer;
+	struct regulator *debug_consumer;
 };
 
 struct regulator_dev *
diff --git a/include/linux/regulator/proxy-consumer.h b/include/linux/regulator/proxy-consumer.h
new file mode 100644
index 0000000..10ba541
--- /dev/null
+++ b/include/linux/regulator/proxy-consumer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_PROXY_CONSUMER_H_
+#define _LINUX_REGULATOR_PROXY_CONSUMER_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct proxy_consumer;
+
+#ifdef CONFIG_REGULATOR_PROXY_CONSUMER
+
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+			struct device_node *reg_node);
+
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer);
+
+#else
+
+static inline struct proxy_consumer *regulator_proxy_consumer_register(
+			struct device *reg_dev, struct device_node *reg_node)
+{ return NULL; }
+
+static inline int regulator_proxy_consumer_unregister(
+			struct proxy_consumer *consumer)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/sde_io_util.h b/include/linux/sde_io_util.h
new file mode 100644
index 0000000..6bd5c16
--- /dev/null
+++ b/include/linux/sde_io_util.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+};
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 1396a25..319c52a2 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -171,6 +171,19 @@
 	module_driver(__spmi_driver, spmi_driver_register, \
 			spmi_driver_unregister)
 
+#ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
+extern int msm_show_resume_irq_mask;
+static inline bool spmi_show_resume_irq(void)
+{
+	return msm_show_resume_irq_mask;
+}
+#else
+static inline bool spmi_show_resume_irq(void)
+{
+	return false;
+}
+#endif
+
 int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
 int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
 			   size_t len);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index af1fb37..df5b292 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -68,6 +68,8 @@
 	int (*close)(struct snd_pcm_substream *substream);
 	int (*ioctl)(struct snd_pcm_substream * substream,
 		     unsigned int cmd, void *arg);
+	int (*compat_ioctl)(struct snd_pcm_substream *substream,
+		     unsigned int cmd, void *arg);
 	int (*hw_params)(struct snd_pcm_substream *substream,
 			 struct snd_pcm_hw_params *params);
 	int (*hw_free)(struct snd_pcm_substream *substream);
@@ -482,6 +484,7 @@
 #endif /* CONFIG_SND_VERBOSE_PROCFS */
 	/* misc flags */
 	unsigned int hw_opened: 1;
+	unsigned int hw_no_buffer: 1; /* substream may not have a buffer */
 };
 
 #define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f8d3912..b5b820b 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -383,6 +383,10 @@
 #define SND_SOC_COMP_ORDER_LATE		1
 #define SND_SOC_COMP_ORDER_LAST		2
 
+/* DAI Link Host Mode Support */
+#define SND_SOC_DAI_LINK_NO_HOST		0x1
+#define SND_SOC_DAI_LINK_OPT_HOST		0x2
+
 /*
  * Bias levels
  *
@@ -760,6 +764,7 @@
 	unsigned int channels_min;	/* min channels */
 	unsigned int channels_max;	/* max channels */
 	unsigned int sig_bits;		/* number of bits of content */
+	const char *aif_name;		/* DAPM AIF widget name */
 };
 
 /* SoC audio ops */
@@ -1057,6 +1062,12 @@
 	/* This DAI link can route to other DAI links at runtime (Frontend)*/
 	unsigned int dynamic:1;
 
+	/*
+	 * This DAI can support no host IO (no pcm data is
+	 * copied to from host)
+	 */
+	unsigned int no_host_mode:2;
+
 	/* DPCM capture and Playback support */
 	unsigned int dpcm_capture:1;
 	unsigned int dpcm_playback:1;
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index a65dacff..9ed3a13 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -29,7 +29,7 @@
 
 /**
  * struct drm_msm_pcc - pcc feature structure
- * flags: for enable/disable, read/write or customize operations
+ * flags: for customizing operations
  * r: red coefficients.
  * g: green coefficients.
  * b: blue coefficients.
@@ -42,10 +42,41 @@
 	struct drm_msm_pcc_coeff b;
 };
 
+/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
+ * flags: for customizing vlut operation
+ * val: vLUT values
+ */
 #define PA_VLUT_SIZE 256
 struct drm_msm_pa_vlut {
 	__u64 flags;
 	__u32 val[PA_VLUT_SIZE];
 };
 
+/* struct drm_msm_memcol - Memory color feature strucuture.
+ *                         Skin, sky, foliage features are supported.
+ * @prot_flags: Bit mask for enabling protection feature.
+ * @color_adjust_p0: Adjustment curve.
+ * @color_adjust_p1: Adjustment curve.
+ * @color_adjust_p2: Adjustment curve.
+ * @blend_gain: Blend gain weightage from othe PA features.
+ * @sat_hold: Saturation hold value.
+ * @val_hold: Value hold info.
+ * @hue_region: Hue qualifier.
+ * @sat_region: Saturation qualifier.
+ * @val_region: Value qualifier.
+ */
+#define DRM_MSM_MEMCOL
+struct drm_msm_memcol {
+	__u64 prot_flags;
+	__u32 color_adjust_p0;
+	__u32 color_adjust_p1;
+	__u32 color_adjust_p2;
+	__u32 blend_gain;
+	__u32 sat_hold;
+	__u32 val_hold;
+	__u32 hue_region;
+	__u32 sat_region;
+	__u32 val_region;
+};
+
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 07a02c7..a25ebe6 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -4,6 +4,32 @@
 /* Total number of supported color planes */
 #define SDE_MAX_PLANES  4
 
+/* Total number of parameterized detail enhancer mapping curves */
+#define SDE_MAX_DE_CURVES 3
+
+ /* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+#define FILTER_ALPHA_2D			0x3
+
+/* Blend filters */
+#define FILTER_BLEND_CIRCULAR_2D	0x0
+#define FILTER_BLEND_SEPARABLE_1D	0x1
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
 /**
  * Blend operations for "blend_op" property
  *
@@ -40,38 +66,42 @@
 
 /**
  * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
- * @num_pxls_start: Number of start pixels
- * @num_pxls_end:   Number of end pixels
- * @ftch_start:     Number of overfetch start pixels
- * @ftch_end:       Number of overfetch end pixels
- * @rpt_start:      Number of repeat start pixels
- * @rpt_end:        Number of repeat end pixels
- * @roi:            Input ROI settings
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch:       Number of extra pixels to overfetch from left
+ * @right_ftch:      Number of extra pixels to overfetch from right
+ * @top_ftch:        Number of extra lines to overfetch from top
+ * @btm_ftch:        Number of extra lines to overfetch from bottom
+ * @left_rpt:        Number of extra pixels to repeat from left
+ * @right_rpt:       Number of extra pixels to repeat from right
+ * @top_rpt:         Number of extra lines to repeat from top
+ * @btm_rpt:         Number of extra lines to repeat from bottom
  */
 struct sde_drm_pix_ext_v1 {
 	/*
 	 * Number of pixels ext in left, right, top and bottom direction
-	 * for all color components. This pixel value for each color
-	 * component should be sum of fetch + repeat pixels.
+	 * for all color components.
 	 */
-	int32_t num_pxls_start[SDE_MAX_PLANES];
-	int32_t num_pxls_end[SDE_MAX_PLANES];
+	int32_t num_ext_pxls_lr[SDE_MAX_PLANES];
+	int32_t num_ext_pxls_tb[SDE_MAX_PLANES];
 
 	/*
 	 * Number of pixels needs to be overfetched in left, right, top
 	 * and bottom directions from source image for scaling.
 	 */
-	int32_t ftch_start[SDE_MAX_PLANES];
-	int32_t ftch_end[SDE_MAX_PLANES];
-
+	int32_t left_ftch[SDE_MAX_PLANES];
+	int32_t right_ftch[SDE_MAX_PLANES];
+	int32_t top_ftch[SDE_MAX_PLANES];
+	int32_t btm_ftch[SDE_MAX_PLANES];
 	/*
 	 * Number of pixels needs to be repeated in left, right, top and
 	 * bottom directions for scaling.
 	 */
-	int32_t rpt_start[SDE_MAX_PLANES];
-	int32_t rpt_end[SDE_MAX_PLANES];
+	int32_t left_rpt[SDE_MAX_PLANES];
+	int32_t right_rpt[SDE_MAX_PLANES];
+	int32_t top_rpt[SDE_MAX_PLANES];
+	int32_t btm_rpt[SDE_MAX_PLANES];
 
-	uint32_t roi[SDE_MAX_PLANES];
 };
 
 /**
@@ -89,9 +119,7 @@
 	/*
 	 * Pix ext settings
 	 */
-	struct sde_drm_pix_ext_v1 lr;
-	struct sde_drm_pix_ext_v1 tb;
-
+	struct sde_drm_pix_ext_v1 pe;
 	/*
 	 * Phase settings
 	 */
@@ -108,6 +136,122 @@
 	uint32_t vert_filter[SDE_MAX_PLANES];
 };
 
+/**
+ * struct sde_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable:         Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip:           Clip coefficient
+ * @limit:          Detail enhancer limit factor
+ * @thr_quiet:      Quite zone threshold
+ * @thr_dieout:     Die-out zone threshold
+ * @thr_low:        Linear zone left threshold
+ * @thr_high:       Linear zone right threshold
+ * @prec_shift:     Detail enhancer precision
+ * @adjust_a:       Mapping curves A coefficients
+ * @adjust_b:       Mapping curves B coefficients
+ * @adjust_c:       Mapping curves C coefficients
+ */
+struct sde_drm_de_v1 {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_drm_scaler_v2 - version 2 of struct sde_drm_scaler
+ * @enable:            Scaler enable
+ * @dir_en:            Detail enhancer enable
+ * @pe:                Pixel extension settings
+ * @horz_decimate:     Horizontal decimation factor
+ * @vert_decimate:     Vertical decimation factor
+ * @init_phase_x:      Initial scaler phase values for x
+ * @phase_step_x:      Phase step values for x
+ * @init_phase_y:      Initial scaler phase values for y
+ * @phase_step_y:      Phase step values for y
+ * @preload_x:         Horizontal preload value
+ * @preload_y:         Vertical preload value
+ * @src_width:         Source width
+ * @src_height:        Source height
+ * @dst_width:         Destination width
+ * @dst_height:        Destination height
+ * @y_rgb_filter_cfg:  Y/RGB plane filter configuration
+ * @uv_filter_cfg:     UV plane filter configuration
+ * @alpha_filter_cfg:  Alpha filter configuration
+ * @blend_cfg:         Selection of blend coefficients
+ * @lut_flag:          LUT configuration flags
+ * @dir_lut_idx:       2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx:    UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx:    UV separable LUT index
+ * @de:                Detail enhancer settings
+ */
+struct sde_drm_scaler_v2 {
+	/*
+	 * General definitions
+	 */
+	uint32_t enable;
+	uint32_t dir_en;
+
+	/*
+	 * Pix ext settings
+	 */
+	struct sde_drm_pix_ext_v1 pe;
+
+	/*
+	 * Decimation settings
+	 */
+	uint32_t horz_decimate;
+	uint32_t vert_decimate;
+
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	uint32_t preload_x[SDE_MAX_PLANES];
+	uint32_t preload_y[SDE_MAX_PLANES];
+	uint32_t src_width[SDE_MAX_PLANES];
+	uint32_t src_height[SDE_MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_drm_de_v1 de;
+};
+
+
 /*
  * Define constants for struct sde_drm_csc
  */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe20e30..dc545a5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1472,7 +1472,7 @@
 		 * yield - it could be a while.
 		 */
 		if (unlikely(queued)) {
-			ktime_t to = ktime_set(0, NSEC_PER_SEC);
+			ktime_t to = ktime_set(0, NSEC_PER_MSEC);
 
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e84ec63..45a2b23 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9442,6 +9442,13 @@
 
 	raw_spin_unlock(&busiest_rq->lock);
 
+	if (push_task) {
+		if (push_task_detached)
+			attach_one_task(target_rq, push_task);
+		put_task_struct(push_task);
+		clear_reserved(target_cpu);
+	}
+
 	if (p)
 		attach_one_task(target_rq, p);
 
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 1f64ab0..1867398 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -655,6 +655,25 @@
 #endif /* CONFIG_X86_X32 */
 };
 
+static int snd_compressed_ioctl32(struct snd_pcm_substream *substream,
+				 unsigned int cmd, void __user *arg)
+{
+	struct snd_pcm_runtime *runtime;
+	int err = 0;
+
+	if (PCM_RUNTIME_CHECK(substream))
+		return -ENXIO;
+	runtime = substream->runtime;
+	if (substream->ops->compat_ioctl) {
+		err = substream->ops->compat_ioctl(substream, cmd, arg);
+	} else {
+		err = -ENOIOCTLCMD;
+		pr_err("%s failed cmd = %d\n", __func__, cmd);
+	}
+	pr_debug("%s called with cmd = %d\n", __func__, cmd);
+	return err;
+}
+
 static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct snd_pcm_file *pcm_file;
@@ -734,6 +753,9 @@
 	case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
 		return snd_pcm_ioctl_channel_info_x32(substream, argp);
 #endif /* CONFIG_X86_X32 */
+	default:
+		if (_IOC_TYPE(cmd) == 'C')
+			return snd_compressed_ioctl32(substream, cmd, argp);
 	}
 
 	return -ENOIOCTLCMD;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index bb12615..294230d 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2115,6 +2115,9 @@
 	struct snd_pcm_runtime *runtime;
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
+	/* TODO: consider and -EINVAL here */
+	if (substream->hw_no_buffer)
+		snd_printd("%s: warning this PCM is host less\n", __func__);
 	runtime = substream->runtime;
 	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
 		return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 9d33c1e..f04abc4 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1034,6 +1034,7 @@
 	if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
 		return -EBADFD;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+	    !substream->hw_no_buffer &&
 	    !snd_pcm_playback_data(substream))
 		return -EPIPE;
 	runtime->trigger_tstamp_latched = false;
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index bf7b52f..bd3b40b 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -728,6 +728,14 @@
 	rtd->compr = compr;
 	compr->private_data = rtd;
 
+	if (platform->driver->pcm_new) {
+		ret = platform->driver->pcm_new(rtd);
+		if (ret < 0) {
+			pr_err("asoc: compress pcm constructor failed\n");
+			goto compr_err;
+		}
+	}
+
 	printk(KERN_INFO "compress asoc: %s <-> %s mapping ok\n", codec_dai->name,
 		cpu_dai->name);
 	return ret;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index ab07789..00c13d7 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -25,6 +25,7 @@
 #include <linux/workqueue.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -35,6 +36,30 @@
 #define DPCM_MAX_BE_USERS	8
 
 /*
+ * ASoC no host IO hardware.
+ * TODO: fine tune these values for all host less transfers.
+ */
+static const struct snd_pcm_hardware no_host_hardware = {
+	.info			= SNDRV_PCM_INFO_MMAP |
+				  SNDRV_PCM_INFO_MMAP_VALID |
+				  SNDRV_PCM_INFO_INTERLEAVED |
+				  SNDRV_PCM_INFO_PAUSE |
+				  SNDRV_PCM_INFO_RESUME,
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+				  SNDRV_PCM_FMTBIT_S32_LE,
+	.period_bytes_min	= PAGE_SIZE >> 2,
+	.period_bytes_max	= PAGE_SIZE >> 1,
+	.periods_min		= 2,
+	.periods_max		= 4,
+	/*
+	 * Increase the max buffer bytes as PAGE_SIZE bytes is
+	 * not enough to encompass all the scenarios sent by
+	 * userspapce.
+	 */
+	.buffer_bytes_max	= PAGE_SIZE * 4,
+};
+
+/*
  * snd_soc_dai_stream_valid() - check if a DAI supports the given stream
  *
  * Returns true if the DAI supports the indicated stream type.
@@ -156,6 +181,8 @@
 	const struct snd_pcm_hardware *hw)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	if (!runtime)
+		return 0;
 	runtime->hw.info = hw->info;
 	runtime->hw.formats = hw->formats;
 	runtime->hw.period_bytes_min = hw->period_bytes_min;
@@ -469,6 +496,9 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+		snd_soc_set_runtime_hwparams(substream, &no_host_hardware);
+
 	/* startup the audio subsystem */
 	if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) {
 		ret = cpu_dai->driver->ops->startup(substream, cpu_dai);
@@ -856,10 +886,31 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	/* perform any hw_params fixups */
+	if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
+				rtd->dai_link->be_hw_params_fixup) {
+		ret = rtd->dai_link->be_hw_params_fixup(rtd,
+				params);
+		if (ret < 0)
+			dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
+			rtd->dai_link->name);
+	}
+
 	ret = soc_pcm_params_symmetry(substream, params);
 	if (ret)
 		goto out;
 
+	/* perform any hw_params fixups */
+	if ((rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) &&
+				rtd->dai_link->be_hw_params_fixup) {
+		ret = rtd->dai_link->be_hw_params_fixup(rtd,
+				params);
+		if (ret < 0) {
+			dev_err(rtd->card->dev, "ASoC: fixup failed for %s\n",
+			rtd->dai_link->name);
+		}
+	}
+
 	if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
 		ret = rtd->dai_link->ops->hw_params(substream, params);
 		if (ret < 0) {
@@ -930,6 +981,23 @@
 	cpu_dai->sample_bits =
 		snd_pcm_format_physical_width(params_format(params));
 
+	/* malloc a page for hostless IO.
+	 * FIXME: rework with alsa-lib changes so that this malloc is
+	 * not required.
+	 */
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST) {
+		substream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV;
+		substream->dma_buffer.dev.dev = rtd->dev;
+		substream->dma_buffer.dev.dev->coherent_dma_mask =
+					DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+		substream->dma_buffer.private_data = NULL;
+
+		arch_setup_dma_ops(substream->dma_buffer.dev.dev,
+				   0, 0, NULL, 0);
+		ret = snd_pcm_lib_malloc_pages(substream, PAGE_SIZE);
+		if (ret < 0)
+			goto platform_err;
+	}
 out:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
@@ -1012,6 +1080,8 @@
 	if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
 		cpu_dai->driver->ops->hw_free(substream, cpu_dai);
 
+	if (rtd->dai_link->no_host_mode == SND_SOC_DAI_LINK_NO_HOST)
+		snd_pcm_lib_free_pages(substream);
 	mutex_unlock(&rtd->pcm_mutex);
 	return 0;
 }
@@ -2294,6 +2364,18 @@
 	return ret;
 }
 
+static int soc_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+		     unsigned int cmd, void *arg)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+
+	if (platform->driver->ops->compat_ioctl)
+		return platform->driver->ops->compat_ioctl(substream,
+			cmd, arg);
+	return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
 static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
 		     unsigned int cmd, void *arg)
 {
@@ -2653,6 +2735,7 @@
 	struct snd_soc_dai *codec_dai;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct snd_pcm *pcm;
+	struct snd_pcm_str *stream;
 	char new_name[64];
 	int ret = 0, playback = 0, capture = 0;
 	int i;
@@ -2725,6 +2808,22 @@
 		goto out;
 	}
 
+	/* setup any hostless PCMs - i.e. no host IO is performed */
+	if (rtd->dai_link->no_host_mode) {
+		if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+			stream = &pcm->streams[SNDRV_PCM_STREAM_PLAYBACK];
+			stream->substream->hw_no_buffer = 1;
+			snd_soc_set_runtime_hwparams(stream->substream,
+						     &no_host_hardware);
+		}
+		if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+			stream = &pcm->streams[SNDRV_PCM_STREAM_CAPTURE];
+			stream->substream->hw_no_buffer = 1;
+			snd_soc_set_runtime_hwparams(stream->substream,
+						     &no_host_hardware);
+		}
+	}
+
 	/* ASoC PCM operations */
 	if (rtd->dai_link->dynamic) {
 		rtd->ops.open		= dpcm_fe_dai_open;
@@ -2735,6 +2834,7 @@
 		rtd->ops.close		= dpcm_fe_dai_close;
 		rtd->ops.pointer	= soc_pcm_pointer;
 		rtd->ops.ioctl		= soc_pcm_ioctl;
+		rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
 	} else {
 		rtd->ops.open		= soc_pcm_open;
 		rtd->ops.hw_params	= soc_pcm_hw_params;
@@ -2744,6 +2844,7 @@
 		rtd->ops.close		= soc_pcm_close;
 		rtd->ops.pointer	= soc_pcm_pointer;
 		rtd->ops.ioctl		= soc_pcm_ioctl;
+		rtd->ops.compat_ioctl   = soc_pcm_compat_ioctl;
 	}
 
 	if (platform->driver->ops) {