Merge "ASoC: Don't close shared back end dailink" into msm-4.8
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 3b5bbe1..fd8fda9 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -18,9 +18,55 @@
 - interrupt-controller: Mark the device node as an interrupt controller.
 - #interrupt-cells: Should be one. The first cell is interrupt number.
 - iommus: Specifies the SID's used by this context bank.
+- qcom,sde-sspp-type:		Array of strings for SDE source surface pipes type information.
+				A source pipe can be "vig", "rgb", "dma" or "cursor" type.
+				Number of xin ids defined should match the number of offsets
+				defined in property: qcom,sde-sspp-off.
+- qcom,sde-sspp-off:		Array of offset for SDE source surface pipes. The offsets
+				are calculated from register "mdp_phys" defined in
+				reg property + "sde-off". The number of offsets defined here should
+				reflect the amount of pipes that can be active in SDE for
+				this configuration.
+- qcom,sde-sspp-xin-id:		Array of VBIF clients ids (xins) corresponding
+				to the respective source pipes. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,sde-sspp-off.
+- qcom,sde-ctl-off:		Array of offset addresses for the available ctl
+				hw blocks within SDE, these offsets are
+				calculated from register "mdp_phys" defined in
+				reg property.  The number of ctl offsets defined
+				here should reflect the number of control paths
+				that can be configured concurrently on SDE for
+				this configuration.
+- qcom,sde-wb-off:		Array of offset addresses for the programmable
+				writeback blocks within SDE.
+- qcom,sde-wb-xin-id:		Array of VBIF clients ids (xins) corresponding
+				to the respective writeback. Number of xin ids
+				defined should match the number of offsets
+				defined in property: qcom,sde-wb-off.
+- qcom,sde-mixer-off:	 	Array of offset addresses for the available
+				mixer blocks that can drive data to panel
+				interfaces. These offsets are be calculated from
+				register "mdp_phys" defined in reg property.
+				The number of offsets defined should reflect the
+				amount of mixers that can drive data to a panel
+				interface.
+- qcom,sde-dspp-off: 		Array of offset addresses for the available dspp
+				blocks. These offsets are calculated from
+				register "mdp_phys" defined in reg property.
+- qcom,sde-pp-off:		Array of offset addresses for the available
+				pingpong blocks. These offsets are calculated
+				from register "mdp_phys" defined in reg property.
+- qcom,sde-intf-off:		Array of offset addresses for the available SDE
+				interface blocks that can drive data to a
+				panel controller. The offsets are calculated
+				from "mdp_phys" defined in reg property. The number
+				of offsets defined should reflect the number of
+				programmable interface blocks available in hardware.
 
 Optional properties:
 - clock-rate:		List of clock rates in Hz.
+- clock-max-rate:	List of maximum clock rate in Hz that this device supports.
 - qcom,platform-supply-entries:	A node that lists the elements of the supply. There
 				can be more than one instance of this binding,
 				in which case the entry would be appended with
@@ -38,11 +84,119 @@
 				-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
 				mdss blocks.
-
-
+- qcom,sde-sspp-src-size:	A u32 value indicates the address range for each sspp.
+- qcom,sde-mixer-size:		A u32 value indicates the address range for each mixer.
+- qcom,sde-ctl-size:		A u32 value indicates the address range for each ctl.
+- qcom,sde-dspp-size:		A u32 value indicates the address range for each dspp.
+- qcom,sde-intf-size:		A u32 value indicates the address range for each intf.
+- qcom,sde-dsc-size:		A u32 value indicates the address range for each dsc.
+- qcom,sde-cdm-size:		A u32 value indicates the address range for each cdm.
+- qcom,sde-pp-size:		A u32 value indicates the address range for each pingpong.
+- qcom,sde-wb-size:		A u32 value indicates the address range for each writeback.
+- qcom,sde-len:			A u32 entry for SDE address range.
+- qcom,sde-intf-max-prefetch-lines:	Array of u32 values for max prefetch lines on
+				each interface.
+- qcom,sde-sspp-linewidth:	A u32 value indicates the max sspp line width.
+- qcom,sde-mixer-linewidth:	A u32 value indicates the max mixer line width.
+- qcom,sde-wb-linewidth:	A u32 value indicates the max writeback line width.
+- qcom,sde-sspp-scale-size:	A u32 value indicates the scaling block size on sspp.
+- qcom,sde-mixer-blendstages:	A u32 value indicates the max mixer blend stages for
+				alpha blending.
+- qcom,sde-qseed-type:		A string entry indiates qseed support on sspp and wb.
+				It supports "qssedv3" and "qseedv2" entries for qseed
+				type. By default "qseedv2" is used if this optional property
+				is not defined.
+- qcom,sde-highest-bank-bit:	A u32 property to indicate GPU/Camera/Video highest memory
+				bank bit used for tile format buffers.
+- qcom,sde-panic-per-pipe:	Boolean property to indicate if panic signal
+				control feature is available on each source pipe.
+- qcom,sde-has-src-split:	Boolean property to indicate if source split
+				feature is available or not.
+- qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
+				feature available or not.
+- qcom,sde-has-cdp:		Boolean property to indicate if cdp feature is
+				available or not.
+- qcom,sde-sspp-clk-ctrl:	Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register. Number of offsets defined should
+				match the number of offsets defined in
+				property: qcom,sde-sspp-off
+- qcom,sde-sspp-clk-status:	Array of offsets describing clk status
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the status
+				register. 2nd value represents bit offset within
+				control register. Number of offsets defined should
+				match the number of offsets defined in
+				property: qcom,sde-sspp-off.
+- qcom,sde-sspp-danger-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
+				indicating the danger luts on sspp.
+- qcom,sde-sspp-safe-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
+				indicating the safe luts on sspp.
+- qcom,sde-sspp-qseed-off:	A u32 offset value indicates the qseed block offset
+				from sspp base. It will install qseed property on
+				vig and rgb sspp pipes.
+- qcom,sde-sspp-csc-off:	A u32 offset value indicates the csc block offset
+				from sspp base. It will be used to install the csc
+				property on vig type pipe.
+- qcom,sde-sspp-max-rects:	Array of u32 values indicating maximum rectangles supported
+				on each sspp. This property is for multirect feature support.
+				Number of offsets defined should match the number of
+				offsets defined in property: qcom,sde-sspp-off.
+- qcom,sde-intf-type:		Array of string provides the interface type information.
+				Possible string values
+					"dsi" - dsi display interface
+					"dp" - Display Port interface
+					"hdmi" - HDMI display interface
+				An interface is considered as "none" if interface type
+				is not defined.
+- qcom,sde-off:			SDE offset from "mdp_phys" defined in reg property.
+- qcom,sde-cdm-off:	 	Array of offset addresses for the available
+				cdm blocks. These offsets will be calculated from
+				register "mdp_phys" defined in reg property.
+- qcom,sde-vbif-off:		Array of offset addresses for the available
+				vbif blocks. These offsets will be calculated from
+				register "vbif_phys" defined in reg property.
+- qcom,sde-vbif-size:		A u32 value indicates the vbif block address range.
+- qcom,sde-te-off:		A u32 offset indicates the te block offset on pingpong.
+				This offset is 0x0 by default.
+- qcom,sde-te2-off:		A u32 offset indicates the te2 block offset on pingpong.
+- qcom,sde-te-size:		A u32 value indicates the te block address range.
+- qcom,sde-te2-size:		A u32 value indicates the te2 block address range.
+- qcom,sde-dsc-off:	 	A u32 offset indicates the dsc block offset on pingpong.
+- qcom,sde-dspp-igc-off:	A u32 offset indicates the igc block offset on dssp.
+- qcom,sde-dspp-pcc-off:	A u32 offset indicates the pcc block offset on dssp.
+- qcom,sde-dspp-gc-off:		A u32 offset indicates the gc block offset on dssp.
+- qcom,sde-dspp-pa-off:		A u32 offset indicates the pa block offset on dssp.
+- qcom,sde-dspp-gamut-off:	A u32 offset indicates the gamut block offset on dssp.
+- qcom,sde-dspp-dither-off:	A u32 offset indicates the dither block offset on dssp.
+- qcom,sde-dspp-hist-off:	A u32 offset indicates the hist block offset on dssp.
+- qcom,sde-dspp-ad-off:		A u32 offset indicates the ad block offset on dssp.
+- qcom,sde-vbif-id:		Array of vbif ids corresponding to the
+				offsets defined in property: qcom,sde-vbif-off.
+- qcom,sde-vbif-default-ot-rd-limit:	A u32 value indicates the default read OT limit
+- qcom,sde-vbif-default-ot-wr-limit:	A u32 value indicates the default write OT limit
+- qcom,sde-vbif-dynamic-ot-rd-limit:	A series of 2 cell property, with a format
+				of (pps, OT limit), where pps is pixel per second and
+				OT limit is the read limit to apply if the given
+				pps is not exceeded.
+- qcom,sde-vbif-dynamic-ot-wr-limit:	A series of 2 cell property, with a format
+				of (pps, OT limit), where pps is pixel per second and
+				OT limit is the write limit to apply if the given
+				pps is not exceeded.
+- qcom,sde-wb-id:		Array of writeback ids corresponding to the
+				offsets defined in property: qcom,sde-wb-off.
+- qcom,sde-wb-clk-ctrl:		Array of 2 cell property describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register. Number of offsets defined should
+				match the number of offsets defined in
+				property: qcom,sde-wb-off
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing client name.
-- qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
+- qcom,msm-bus,num-cases:	This is the number of Bus Scaling use cases
 				defined in the vectors property.
 - qcom,msm-bus,num-paths:	This represents the number of paths in each
 				Bus Scaling Usecase.
@@ -90,6 +244,7 @@
       "mmagic_clk",
       "vsync_clk";
     clock-rate = <0>, <0>, <0>;
+    clock-max-rate= <0 320000000 0>;
     mmagic-supply = <&gdsc_mmagic_mdss>;
     vdd-supply = <&gdsc_mdss>;
     interrupt-parent = <&intc>;
@@ -98,6 +253,104 @@
     #interrupt-cells = <1>;
     iommus = <&mdp_smmu 0>;
 
+    qcom,sde-off = <0x1000>;
+    qcom,sde-ctl-off = <0x00002000 0x00002200 0x00002400
+		     0x00002600 0x00002800>;
+    qcom,sde-mixer-off = <0x00045000 0x00046000
+			0x00047000 0x0004a000>;
+    qcom,sde-dspp-off = <0x00055000 0x00057000>;
+    qcom,sde-wb-off = <0x00066000>;
+    qcom,sde-wb-xin-id = <6>;
+    qcom,sde-intf-off = <0x0006b000 0x0006b800
+			0x0006c000 0x0006c800>;
+    qcom,sde-intf-type = "none", "dsi", "dsi", "hdmi";
+    qcom,sde-pp-off = <0x00071000 0x00071800
+			  0x00072000 0x00072800>;
+    qcom,sde-cdm-off = <0x0007a200>;
+    qcom,sde-dsc-off = <0x00081000 0x00081400>;
+    qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+    qcom,sde-sspp-type = "vig", "vig", "vig",
+			      "vig", "rgb", "rgb",
+			      "rgb", "rgb", "dma",
+			      "dma", "cursor", "cursor";
+
+    qcom,sde-sspp-off = <0x00005000 0x00007000 0x00009000
+		      0x0000b000 0x00015000 0x00017000
+		      0x00019000 0x0001b000 0x00025000
+		      0x00027000 0x00035000 0x00037000>;
+
+    qcom,sde-sspp-xin-id = <0 4 8
+			12 1 5
+			9 13 2
+			10 7 7>;
+
+    /* offsets are relative to "mdp_phys + qcom,sde-off */
+    qcom,sde-sspp-clk-ctrl = <0x2ac 0>, <0x2b4 0>, <0x2bc 0>,
+			  <0x2c4 0>, <0x2ac 4>, <0x2b4 4>, <0x2bc 4>,
+			  <0x2c4 4>, <0x2ac 8>, <0x2b4 8>, <0x3a8 16>,
+			  <0x3b0 16>;
+    qcom,sde-sspp-clk-status = <0x2ac 0>, <0x2b4 0>, <0x2bc 0>,
+			  <0x2c4 0>, <0x2ac 4>, <0x2b4 4>, <0x2bc 4>,
+			  <0x2c4 4>, <0x2ac 8>, <0x2b4 8>, <0x3a8 16>,
+			  <0x3b0 16>;
+    qcom,sde-mixer-linewidth = <2560>;
+    qcom,sde-sspp-linewidth = <2560>;
+    qcom,sde-mixer-blendstages = <0x7>;
+    qcom,sde-highest-bank-bit = <0x2>;
+    qcom,sde-panic-per-pipe;
+    qcom,sde-has-cdp;
+    qcom,sde-has-src-split;
+    qcom,sde-sspp-src-size = <0x100>;
+    qcom,sde-mixer-size = <0x100>;
+    qcom,sde-ctl-size = <0x100>;
+    qcom,sde-dspp-size = <0x100>;
+    qcom,sde-intf-size = <0x100>;
+    qcom,sde-dsc-size = <0x100>;
+    qcom,sde-cdm-size = <0x100>;
+    qcom,sde-pp-size = <0x100>;
+    qcom,sde-wb-size = <0x100>;
+    qcom,sde-len = <0x100>;
+    qcom,sde-wb-linewidth = <2560>;
+    qcom,sde-sspp-scale-size = <0x100>;
+    qcom,sde-mixer-blendstages = <0x8>;
+    qcom,sde-qseed-type = "qseedv2";
+    qcom,sde-highest-bank-bit = <15>;
+    qcom,sde-has-mixer-gc;
+    qcom,sde-sspp-max-rects = <1 1 1 1
+				1 1 1 1
+				1 1
+				1 1>;
+    qcom,sde-te-off = <0x100>;
+    qcom,sde-te2-off = <0x100>;
+    qcom,sde-te-size = <0xffff>;
+    qcom,sde-te2-size = <0xffff>;
+    qcom,sde-sspp-qseed-off = <0x100>;
+    qcom,sde-sspp-csc-off = <0x100>;
+    qcom,sde-dspp-igc-off = <0x100>;
+    qcom,sde-dspp-pcc-off = <0x100>;
+    qcom,sde-dspp-gc-off = <0x100>;
+    qcom,sde-dspp-pa-off = <0x100>;
+    qcom,sde-dspp-gamut-off = <0x100>;
+    qcom,sde-dspp-dither-off = <0x100>;
+    qcom,sde-dspp-hist-off = <0x100>;
+    qcom,sde-dspp-ad-off = <0x100>;
+
+    qcom,sde-wb-id = <2>;
+    qcom,sde-wb-clk-ctrl = <0x2bc 16>;
+
+    qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+    qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+    qcom,sde-vbif-off = <0 0>;
+    qcom,sde-vbif-id = <0 1>;
+    qcom,sde-vbif-default-ot-rd-limit = <32>;
+    qcom,sde-vbif-default-ot-wr-limit = <16>;
+    qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+        <124416000 4>, <248832000 16>;
+    qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+        <124416000 4>, <248832000 16>;
+
     qcom,platform-supply-entries {
        #address-cells = <1>;
        #size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index c15a508..dd53a87 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -690,9 +690,9 @@
 
 		interrupts = <0 266 1>;
 		vdd_cx-supply = <&pm8998_s9_level>;
-		vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_MAX>;
+		vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
 		vdd_mx-supply = <&pm8998_s6_level>;
-		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_MAX>;
+		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
 		qcom,firmware-name = "modem";
 		qcom,pil-self-auth;
 		qcom,sysmon-id = <0>;
@@ -721,7 +721,7 @@
 
 		vdd_cx-supply = <&pm8998_s9_level>;
 		qcom,proxy-reg-names = "vdd_cx";
-		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
 
 		clocks = <&clock_gcc RPMH_CXO_CLK>;
 		clock-names = "xo";
@@ -753,7 +753,7 @@
 
 		vdd_cx-supply = <&pm8998_l27_level>;
 		vdd_px-supply = <&pm8998_lvs2>;
-		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 0>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
 		qcom,proxy-reg-names = "vdd_cx", "vdd_px";
 		qcom,keep-proxy-regs-on;
 
@@ -802,9 +802,9 @@
 
 		vdd_cx-supply = <&pm8998_s9_level>;
 		qcom,proxy-reg-names = "vdd_cx";
-		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
 		vdd_mx-supply = <&pm8998_s6_level>;
-		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
 
 		clocks = <&clock_gcc RPMH_CXO_CLK>;
 		clock-names = "xo";
@@ -837,7 +837,7 @@
 
 		vdd_cx-supply = <&pm8998_s9_level>;
 		qcom,proxy-reg-names = "vdd_cx";
-		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
 
 		clocks = <&clock_gcc RPMH_CXO_CLK>;
 		clock-names = "xo";
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 354be2a..b08c4eb 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -27,21 +27,21 @@
 {
 	while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
 			 !IS_ALIGNED((unsigned long)to, 8))) {
-		*(u8 *)to = __raw_readb(from);
+		*(u8 *)to = __raw_readb_no_log(from);
 		from++;
 		to++;
 		count--;
 	}
 
 	while (count >= 8) {
-		*(u64 *)to = __raw_readq(from);
+		*(u64 *)to = __raw_readq_no_log(from);
 		from += 8;
 		to += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		*(u8 *)to = __raw_readb(from);
+		*(u8 *)to = __raw_readb_no_log(from);
 		from++;
 		to++;
 		count--;
@@ -56,21 +56,21 @@
 {
 	while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
 			 !IS_ALIGNED((unsigned long)from, 8))) {
-		__raw_writeb(*(volatile u8 *)from, to);
+		__raw_writeb_no_log(*(volatile u8 *)from, to);
 		from++;
 		to++;
 		count--;
 	}
 
 	while (count >= 8) {
-		__raw_writeq(*(volatile u64 *)from, to);
+		__raw_writeq_no_log(*(volatile u64 *)from, to);
 		from += 8;
 		to += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		__raw_writeb(*(volatile u8 *)from, to);
+		__raw_writeb_no_log(*(volatile u8 *)from, to);
 		from++;
 		to++;
 		count--;
@@ -90,19 +90,19 @@
 	qc |= qc << 32;
 
 	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
-		__raw_writeb(c, dst);
+		__raw_writeb_no_log(c, dst);
 		dst++;
 		count--;
 	}
 
 	while (count >= 8) {
-		__raw_writeq(qc, dst);
+		__raw_writeq_no_log(qc, dst);
 		dst += 8;
 		count -= 8;
 	}
 
 	while (count) {
-		__raw_writeb(c, dst);
+		__raw_writeb_no_log(c, dst);
 		dst++;
 		count--;
 	}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760..958e255 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -294,7 +294,8 @@
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
-	"/lib/firmware"
+	"/lib/firmware",
+	"/firmware/image"
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5c3dbd0..26b6bc8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -5,7 +5,7 @@
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
 ccflags-y += -Idrivers/gpu/drm/msm/sde
 
-msm-y := \
+msm_drm-y := \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
@@ -43,6 +43,8 @@
 	sde/sde_kms.o \
 	sde/sde_plane.o \
 	sde/sde_connector.o \
+	sde/sde_color_processing.o \
+	sde/sde_vbif.o \
 	msm_atomic.o \
 	msm_debugfs.o \
 	msm_drv.o \
@@ -64,13 +66,13 @@
 
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
-msm-y += adreno/adreno_device.o \
+msm_drm-y += adreno/adreno_device.o \
 	adreno/adreno_gpu.o \
 	adreno/a3xx_gpu.o \
 	adreno/a4xx_gpu.o
 endif
 
-msm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
 	mdp/mdp4/mdp4_dtv_encoder.o \
 	mdp/mdp4/mdp4_lcdc_encoder.o \
 	mdp/mdp4/mdp4_lvds_connector.o \
@@ -78,15 +80,15 @@
 	mdp/mdp4/mdp4_kms.o \
 	mdp/mdp4/mdp4_plane.o
 
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_SYNC) += sde/sde_fence.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm_drm-$(CONFIG_SYNC) += sde/sde_fence.o
+msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
+msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
-msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+msm_drm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
 
-msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
 			mdp/mdp4/mdp4_dsi_encoder.o \
 			dsi/dsi_cfg.o \
 			dsi/dsi_host.o \
@@ -95,16 +97,16 @@
 			dsi/dsi_manager.o \
 			mdp/mdp5/mdp5_cmd_encoder.o
 
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
 
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
+msm_drm-y += dsi/pll/dsi_pll.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
 endif
-msm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
+msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
 				dsi-staging/dsi_clk_pwr.o \
 				dsi-staging/dsi_phy.o \
 				dsi-staging/dsi_phy_hw_v4_0.o \
@@ -116,10 +118,9 @@
 				dsi-staging/dsi_panel.o \
 				dsi-staging/dsi_display_test.o
 
-obj-$(CONFIG_DRM_MSM)	+= msm.o
+obj-$(CONFIG_DRM_MSM)	+= msm_drm.o
 
 obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \
-	sde/sde_hw_catalog_8996.o \
 	sde/sde_hw_cdm.o \
 	sde/sde_hw_dspp.o \
 	sde/sde_hw_intf.o \
@@ -132,7 +133,7 @@
 	sde/sde_hw_top.o \
 	sde/sde_hw_interrupts.o \
 	sde/sde_hw_vbif.o \
-	sde/sde_formats.o
+	sde/sde_formats.o \
 
 obj-$(CONFIG_DRM_MSM) += display-manager/display_manager.o
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c05dabd..59e9899 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -128,7 +128,7 @@
 
 static int dsi_dipslay_debugfs_deinit(struct dsi_display *display)
 {
-	debugfs_remove(display->root);
+	debugfs_remove_recursive(display->root);
 
 	return 0;
 }
@@ -999,6 +999,7 @@
 	src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
 	if (IS_ERR_OR_NULL(src->byte_clk)) {
 		rc = PTR_ERR(src->byte_clk);
+		src->byte_clk = NULL;
 		pr_err("failed to get src_byte_clk, rc=%d\n", rc);
 		goto error;
 	}
@@ -1006,6 +1007,7 @@
 	src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
 	if (IS_ERR_OR_NULL(src->pixel_clk)) {
 		rc = PTR_ERR(src->pixel_clk);
+		src->pixel_clk = NULL;
 		pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
 		goto error;
 	}
@@ -1014,6 +1016,7 @@
 	if (IS_ERR_OR_NULL(mux->byte_clk)) {
 		rc = PTR_ERR(mux->byte_clk);
 		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		mux->byte_clk = NULL;
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
 		 * non-critical failure since these clocks are requied only for
@@ -1026,6 +1029,7 @@
 	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
 	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
 		rc = PTR_ERR(mux->pixel_clk);
+		mux->pixel_clk = NULL;
 		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1039,6 +1043,7 @@
 	shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
 	if (IS_ERR_OR_NULL(shadow->byte_clk)) {
 		rc = PTR_ERR(shadow->byte_clk);
+		shadow->byte_clk = NULL;
 		pr_err("failed to get shadow_byte_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1053,6 +1058,7 @@
 					 "shadow_pixel_clk");
 	if (IS_ERR_OR_NULL(shadow->pixel_clk)) {
 		rc = PTR_ERR(shadow->pixel_clk);
+		shadow->pixel_clk = NULL;
 		pr_err("failed to get shadow_pixel_clk, rc=%d\n", rc);
 		/*
 		 * Skip getting rest of clocks since one failed. This is a
@@ -1888,6 +1894,8 @@
 			       display->name, i, rc);
 	}
 
+	(void)dsi_dipslay_debugfs_deinit(display);
+
 	mutex_unlock(&display->display_lock);
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c97d097..8e30f04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -24,7 +24,6 @@
 #include "display_manager.h"
 #include "sde_wb.h"
 
-
 /*
  * MSM driver version:
  * - 1.0.0 - initial interface
@@ -35,6 +34,8 @@
 #define MSM_VERSION_MINOR	2
 #define MSM_VERSION_PATCHLEVEL	0
 
+#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -501,7 +502,16 @@
 	if (ret)
 		goto fail;
 
-	drm_kms_helper_poll_init(ddev);
+	/* perform subdriver post initialization */
+	if (kms && kms->funcs && kms->funcs->postinit) {
+		ret = kms->funcs->postinit(kms);
+		if (ret) {
+			dev_err(dev->dev, "kms post init failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
+	drm_kms_helper_poll_init(dev);
 
 	return 0;
 
@@ -562,7 +572,20 @@
 static void msm_preclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->preclose)
+		kms->funcs->preclose(kms, file);
+}
+
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_kms *kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->postclose)
+		kms->funcs->postclose(kms, file);
 
 	mutex_lock(&dev->struct_mutex);
 	if (ctx == priv->lastctx)
@@ -572,11 +595,126 @@
 	kfree(ctx);
 }
 
+static int msm_disable_all_modes_commit(
+		struct drm_device *dev,
+		struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	unsigned int plane_mask;
+	int ret;
+
+	plane_mask = 0;
+	drm_for_each_plane(plane, dev) {
+		struct drm_plane_state *plane_state;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state)) {
+			ret = PTR_ERR(plane_state);
+			goto fail;
+		}
+
+		plane_state->rotation = BIT(DRM_ROTATE_0);
+
+		plane->old_fb = plane->fb;
+		plane_mask |= 1 << drm_plane_index(plane);
+
+		/* disable non-primary: */
+		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+			continue;
+
+		DRM_DEBUG("disabling plane %d\n", plane->base.id);
+
+		ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling plane %d\n", ret,
+					plane->base.id);
+	}
+
+	drm_for_each_crtc(crtc, dev) {
+		struct drm_mode_set mode_set;
+
+		memset(&mode_set, 0, sizeof(struct drm_mode_set));
+		mode_set.crtc = crtc;
+
+		DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
+
+		ret = __drm_atomic_helper_set_config(&mode_set, state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling crtc %d\n", ret,
+					crtc->base.id);
+	}
+
+	DRM_DEBUG("committing disables\n");
+	ret = drm_atomic_commit(state);
+
+fail:
+	drm_atomic_clean_old_fb(dev, plane_mask, ret);
+	DRM_DEBUG("disables result %d\n", ret);
+	return ret;
+}
+
+/**
+ * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
+ *	based on restore_fbdev_mode_atomic in drm_fb_helper.c
+ * @dev: device pointer
+ * @Return: 0 on success, otherwise -error
+ */
+static int msm_disable_all_modes(struct drm_device *dev)
+{
+	struct drm_atomic_state *state;
+	int ret, i;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return -ENOMEM;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+		ret = msm_disable_all_modes_commit(dev, state);
+		if (ret != -EDEADLK)
+			break;
+		drm_atomic_state_clear(state);
+		drm_atomic_legacy_backoff(state);
+	}
+
+	/* on successful atomic commit state ownership transfers to framework */
+	if (ret != 0)
+		drm_atomic_state_free(state);
+
+	return ret;
+}
+
 static void msm_lastclose(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	if (priv->fbdev)
+	struct msm_kms *kms = priv->kms;
+	int i;
+
+	/*
+	 * clean up vblank disable immediately as this is the last close.
+	 */
+	for (i = 0; i < dev->num_crtcs; i++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[i];
+		struct timer_list *disable_timer = &vblank->disable_timer;
+
+		if (del_timer_sync(disable_timer))
+			disable_timer->function(disable_timer->data);
+	}
+
+	/* wait for pending vblank requests to be executed by worker thread */
+	flush_workqueue(priv->wq);
+
+	if (priv->fbdev) {
 		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+	} else {
+		drm_modeset_lock_all(dev);
+		msm_disable_all_modes(dev);
+		drm_modeset_unlock_all(dev);
+		if (kms && kms->funcs && kms->funcs->lastclose)
+			kms->funcs->lastclose(kms);
+	}
 }
 
 static irqreturn_t msm_irq(int irq, void *arg)
@@ -836,6 +974,7 @@
 				DRIVER_MODESET,
 	.open               = msm_open,
 	.preclose           = msm_preclose,
+	.postclose          = msm_postclose,
 	.lastclose          = msm_lastclose,
 	.irq_handler        = msm_irq,
 	.irq_preinstall     = msm_irq_preinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 59efc45..e5adb11 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -78,8 +78,8 @@
 
 enum msm_mdp_plane_property {
 	/* blob properties, always put these first */
-	PLANE_PROP_SCALER,
-	PLANE_PROP_CSC,
+	PLANE_PROP_SCALER_V1,
+	PLANE_PROP_CSC_V1,
 	PLANE_PROP_INFO,
 
 	/* # of blob properties */
@@ -89,6 +89,8 @@
 	PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
 	PLANE_PROP_ALPHA,
 	PLANE_PROP_COLOR_FILL,
+	PLANE_PROP_H_DECIMATE,
+	PLANE_PROP_V_DECIMATE,
 	PLANE_PROP_INPUT_FENCE,
 
 	/* enum/bitmask properties */
@@ -101,6 +103,8 @@
 };
 
 enum msm_mdp_crtc_property {
+	CRTC_PROP_INFO,
+
 	/* # of blob properties */
 	CRTC_PROP_BLOBCOUNT,
 
@@ -281,6 +285,9 @@
 	struct drm_property *crtc_property[CRTC_PROP_COUNT];
 	struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
 
+	/* Color processing properties for the crtc */
+	struct drm_property **cp_property;
+
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
 		unsigned long size;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 22e7441..5f97483 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -42,6 +42,7 @@
 struct msm_kms_funcs {
 	/* hw initialization: */
 	int (*hw_init)(struct msm_kms *kms);
+	int (*postinit)(struct msm_kms *kms);
 	/* irq handling: */
 	void (*irq_preinstall)(struct msm_kms *kms);
 	int (*irq_postinstall)(struct msm_kms *kms);
@@ -78,6 +79,9 @@
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
 	void (*postopen)(struct msm_kms *kms, struct drm_file *file);
+	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*lastclose)(struct msm_kms *kms);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
 };
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 3edf4b9..663781f 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -134,9 +134,20 @@
 			&info->dirty_list);
 }
 
-void msm_property_install_range(struct msm_property_info *info,
+/**
+ * _msm_property_install_integer - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_integer(struct msm_property_info *info,
 		const char *name, int flags, uint64_t min, uint64_t max,
-		uint64_t init, uint32_t property_idx)
+		uint64_t init, uint32_t property_idx, bool force_dirty)
 {
 	struct drm_property **prop;
 
@@ -162,6 +173,7 @@
 
 		/* save init value for later */
 		info->property_data[property_idx].default_value = init;
+		info->property_data[property_idx].force_dirty = force_dirty;
 
 		/* always attach property, if created */
 		if (*prop) {
@@ -171,6 +183,22 @@
 	}
 }
 
+void msm_property_install_range(struct msm_property_info *info,
+		const char *name, int flags, uint64_t min, uint64_t max,
+		uint64_t init, uint32_t property_idx)
+{
+	_msm_property_install_integer(info, name, flags,
+			min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_range(struct msm_property_info *info,
+		const char *name, int flags, uint64_t min, uint64_t max,
+		uint64_t init, uint32_t property_idx)
+{
+	_msm_property_install_integer(info, name, flags,
+			min, max, init, property_idx, true);
+}
+
 void msm_property_install_rotation(struct msm_property_info *info,
 		unsigned int supported_rotations, uint32_t property_idx)
 {
@@ -198,6 +226,7 @@
 
 		/* save init value for later */
 		info->property_data[property_idx].default_value = 0;
+		info->property_data[property_idx].force_dirty = false;
 
 		/* always attach property, if created */
 		if (*prop) {
@@ -244,6 +273,7 @@
 
 		/* save init value for later */
 		info->property_data[property_idx].default_value = 0;
+		info->property_data[property_idx].force_dirty = false;
 
 		/* always attach property, if created */
 		if (*prop) {
@@ -281,6 +311,7 @@
 
 		/* save init value for later */
 		info->property_data[property_idx].default_value = 0;
+		info->property_data[property_idx].force_dirty = true;
 
 		/* always attach property, if created */
 		if (*prop) {
@@ -350,7 +381,7 @@
 
 	property_idx = msm_property_index(info, property);
 	if (!info || (property_idx == -EINVAL) || !property_values) {
-		DRM_ERROR("invalid argument(s)\n");
+		DRM_DEBUG("Invalid argument(s)\n");
 	} else {
 		/* extra handling for incoming properties */
 		mutex_lock(&info->property_lock);
@@ -376,11 +407,14 @@
 		}
 
 		/* update value and flag as dirty */
-		property_values[property_idx] = val;
-		_msm_property_set_dirty_no_lock(info, property_idx);
-		mutex_unlock(&info->property_lock);
+		if (property_values[property_idx] != val ||
+				info->property_data[property_idx].force_dirty) {
+			property_values[property_idx] = val;
+			_msm_property_set_dirty_no_lock(info, property_idx);
 
-		DBG("%s - %lld", property->name, val);
+			DBG("%s - %lld", property->name, val);
+		}
+		mutex_unlock(&info->property_lock);
 		rc = 0;
 	}
 
@@ -396,7 +430,7 @@
 
 	property_idx = msm_property_index(info, property);
 	if (!info || (property_idx == -EINVAL) || !property_values || !val) {
-		DRM_ERROR("invalid argument(s)\n");
+		DRM_DEBUG("Invalid argument(s)\n");
 	} else {
 		mutex_lock(&info->property_lock);
 		*val = property_values[property_idx];
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index f065cbf..dbe28bd 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -23,10 +23,13 @@
  *                            drm-object per property stuff
  * @default_value: Default property value for this drm object
  * @dirty_node: Linked list node to track if property is dirty or not
+ * @force_dirty: Always dirty property on incoming sets, rather than checking
+ *               for modified values
  */
 struct msm_property_data {
 	uint64_t default_value;
 	struct list_head dirty_node;
+	bool force_dirty;
 };
 
 /**
@@ -172,7 +175,7 @@
  * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
  * @min: Min property value
  * @max: Max property value
- * @init: Default property value
+ * @init: Default Property value
  * @property_idx: Property index
  */
 void msm_property_install_range(struct msm_property_info *info,
@@ -184,6 +187,27 @@
 		uint32_t property_idx);
 
 /**
+ * msm_property_install_volatile_range - install drm range property
+ *	This function is similar to msm_property_install_range, but assumes
+ *	that the property is meant for holding user pointers or descriptors
+ *	that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_range(struct msm_property_info *info,
+		const char *name,
+		int flags,
+		uint64_t min,
+		uint64_t max,
+		uint64_t init,
+		uint32_t property_idx);
+
+/**
  * msm_property_install_rotation - install standard drm rotation property
  * @info: Pointer to property info container struct
  * @supported_rotations: Bitmask of supported rotation values (see
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
new file mode 100644
index 0000000..0227b59
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -0,0 +1,909 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <drm/msm_drm_pp.h>
+#include "sde_color_processing.h"
+#include "sde_kms.h"
+#include "sde_crtc.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_lm.h"
+
+struct sde_cp_node {
+	u32 property_id;
+	u32 prop_flags;
+	u32 feature;
+	void *blob_ptr;
+	uint64_t prop_val;
+	const struct sde_pp_blk *pp_blk;
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+	void (*dspp_feature_op)(struct sde_hw_dspp *ctx, void *cfg);
+	void (*lm_feature_op)(struct sde_hw_mixer *mixer, void *cfg);
+};
+
+struct sde_cp_prop_attach {
+	struct drm_crtc *crtc;
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node;
+	const struct sde_pp_blk *pp_blk;
+	u32 feature;
+	void *ops;
+	uint64_t val;
+};
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp);
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp);
+
+static void dspp_ad_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp);
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp);
+
+typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp);
+
+static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
+
+#define setup_dspp_prop_install_funcs(func) \
+do { \
+	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
+	func[SDE_DSPP_HSIC] = dspp_hsic_install_property; \
+	func[SDE_DSPP_AD] = dspp_ad_install_property; \
+	func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
+} while (0)
+
+typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc,
+					struct sde_hw_mixer *hw_mixer);
+
+static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
+
+static void lm_gc_install_property(struct drm_crtc *crtc,
+				     struct sde_hw_mixer *hw_mixer);
+
+#define setup_lm_prop_install_funcs(func) \
+	(func[SDE_MIXER_GC] = lm_gc_install_property)
+
+enum {
+	/* Append new DSPP features before SDE_CP_CRTC_DSPP_MAX */
+	/* DSPP Features start */
+	SDE_CP_CRTC_DSPP_IGC,
+	SDE_CP_CRTC_DSPP_PCC,
+	SDE_CP_CRTC_DSPP_GC,
+	SDE_CP_CRTC_DSPP_HUE,
+	SDE_CP_CRTC_DSPP_SAT,
+	SDE_CP_CRTC_DSPP_VAL,
+	SDE_CP_CRTC_DSPP_CONT,
+	SDE_CP_CRTC_DSPP_MEMCOLOR,
+	SDE_CP_CRTC_DSPP_SIXZONE,
+	SDE_CP_CRTC_DSPP_GAMUT,
+	SDE_CP_CRTC_DSPP_DITHER,
+	SDE_CP_CRTC_DSPP_HIST,
+	SDE_CP_CRTC_DSPP_AD,
+	SDE_CP_CRTC_DSPP_VLUT,
+	SDE_CP_CRTC_DSPP_MAX,
+	/* DSPP features end */
+
+	/* Append new LM features before SDE_CP_CRTC_MAX_FEATURES */
+	/* LM feature start*/
+	SDE_CP_CRTC_LM_GC,
+	/* LM feature end*/
+
+	SDE_CP_CRTC_MAX_FEATURES,
+};
+
+#define INIT_PROP_ATTACH(p, crtc, prop, node, blk, feature, func, val) \
+	do { \
+		(p)->crtc = crtc; \
+		(p)->prop = prop; \
+		(p)->prop_node = node; \
+		(p)->pp_blk = blk; \
+		(p)->feature = feature; \
+		(p)->ops = func; \
+		(p)->val = val; \
+	} while (0)
+
+static void sde_cp_get_hw_payload(struct sde_cp_node *prop_node,
+				  struct sde_hw_cp_cfg *hw_cfg,
+				  bool *feature_enabled)
+{
+
+	struct drm_property_blob *blob = NULL;
+
+	memset(hw_cfg, 0, sizeof(*hw_cfg));
+	*feature_enabled = false;
+
+	blob = prop_node->blob_ptr;
+	if (prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+		if (blob) {
+			hw_cfg->len = blob->length;
+			hw_cfg->payload = blob->data;
+			*feature_enabled = true;
+		}
+	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
+		/* Check if local blob is Set */
+		if (!blob) {
+			hw_cfg->len = sizeof(prop_node->prop_val);
+			if (prop_node->prop_val)
+				hw_cfg->payload = &prop_node->prop_val;
+		} else {
+			hw_cfg->len = (prop_node->prop_val) ? blob->length :
+					0;
+			hw_cfg->payload = (prop_node->prop_val) ? blob->data
+						: NULL;
+		}
+		if (prop_node->prop_val)
+			*feature_enabled = true;
+	} else {
+		DRM_ERROR("property type is not supported\n");
+	}
+}
+
+static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
+{
+	struct drm_property_blob *blob = prop_node->blob_ptr;
+
+	if (!blob)
+		return -EINVAL;
+	drm_property_unreference_blob(blob);
+	prop_node->blob_ptr = NULL;
+	return 0;
+}
+
+static int sde_cp_create_local_blob(struct drm_crtc *crtc, u32 feature, int len)
+{
+	int ret = -EINVAL;
+	bool found = false;
+	struct sde_cp_node *prop_node = NULL;
+	struct drm_property_blob *blob_ptr;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (prop_node->feature == feature) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+		DRM_ERROR("local blob create failed prop found %d flags %d\n",
+		       found, prop_node->prop_flags);
+		return ret;
+	}
+
+	blob_ptr = drm_property_create_blob(crtc->dev, len, NULL);
+	ret = (IS_ERR_OR_NULL(blob_ptr)) ? PTR_ERR(blob_ptr) : 0;
+	if (!ret)
+		prop_node->blob_ptr = blob_ptr;
+
+	return ret;
+}
+
+static void sde_cp_destroy_local_blob(struct sde_cp_node *prop_node)
+{
+	if (!(prop_node->prop_flags & DRM_MODE_PROP_BLOB) &&
+		prop_node->blob_ptr)
+		drm_property_unreference_blob(prop_node->blob_ptr);
+}
+
+static int sde_cp_handle_range_property(struct sde_cp_node *prop_node,
+					uint64_t val)
+{
+	int ret = 0;
+	struct drm_property_blob *blob_ptr = prop_node->blob_ptr;
+
+	if (!blob_ptr) {
+		prop_node->prop_val = val;
+		return 0;
+	}
+
+	if (!val) {
+		prop_node->prop_val = 0;
+		return 0;
+	}
+
+	ret = copy_from_user(blob_ptr->data, (void *)val, blob_ptr->length);
+	if (ret) {
+		DRM_ERROR("failed to get the property info ret %d", ret);
+		ret = -EFAULT;
+	} else {
+		prop_node->prop_val = val;
+	}
+
+	return ret;
+}
+
+static int sde_cp_disable_crtc_property(struct drm_crtc *crtc,
+					 struct drm_property *property,
+					 struct sde_cp_node *prop_node)
+{
+	int ret = -EINVAL;
+
+	if (property->flags & DRM_MODE_PROP_BLOB)
+		ret = sde_cp_disable_crtc_blob_property(prop_node);
+	else if (property->flags & DRM_MODE_PROP_RANGE)
+		ret = sde_cp_handle_range_property(prop_node, 0);
+	return ret;
+}
+
+static int sde_cp_enable_crtc_blob_property(struct drm_crtc *crtc,
+					       struct sde_cp_node *prop_node,
+					       uint64_t val)
+{
+	struct drm_property_blob *blob = NULL;
+
+	/**
+	 * For non-blob based properties add support to create a blob
+	 * using the val and store the blob_ptr in prop_node.
+	 */
+	blob = drm_property_lookup_blob(crtc->dev, val);
+	if (!blob) {
+		DRM_ERROR("invalid blob id %lld\n", val);
+		return -EINVAL;
+	}
+	/* Release refernce to existing payload of the property */
+	if (prop_node->blob_ptr)
+		drm_property_unreference_blob(prop_node->blob_ptr);
+
+	prop_node->blob_ptr = blob;
+	return 0;
+}
+
+static int sde_cp_enable_crtc_property(struct drm_crtc *crtc,
+				       struct drm_property *property,
+				       struct sde_cp_node *prop_node,
+				       uint64_t val)
+{
+	int ret = -EINVAL;
+
+	if (property->flags & DRM_MODE_PROP_BLOB)
+		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
+	else if (property->flags & DRM_MODE_PROP_RANGE)
+		ret = sde_cp_handle_range_property(prop_node, val);
+	return ret;
+}
+
+static int sde_cp_crtc_get_mixer_idx(struct sde_crtc *sde_crtc)
+{
+	if (sde_crtc->num_mixers)
+		return sde_crtc->mixers[0].hw_lm->idx;
+	else
+		return -EINVAL;
+}
+
+static struct sde_kms *get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv = crtc->dev->dev_private;
+
+	return to_sde_kms(priv->kms);
+}
+
+static void sde_cp_crtc_prop_attach(struct sde_cp_prop_attach *prop_attach)
+{
+
+	struct sde_crtc *sde_crtc = to_sde_crtc(prop_attach->crtc);
+
+	drm_object_attach_property(&prop_attach->crtc->base,
+				   prop_attach->prop, prop_attach->val);
+
+	INIT_LIST_HEAD(&prop_attach->prop_node->active_list);
+	INIT_LIST_HEAD(&prop_attach->prop_node->dirty_list);
+
+	prop_attach->prop_node->property_id = prop_attach->prop->base.id;
+	prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
+	prop_attach->prop_node->feature = prop_attach->feature;
+	prop_attach->prop_node->pp_blk = prop_attach->pp_blk;
+
+	if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
+		prop_attach->prop_node->dspp_feature_op = prop_attach->ops;
+	else
+		prop_attach->prop_node->lm_feature_op = prop_attach->ops;
+
+	list_add(&prop_attach->prop_node->feature_list,
+		 &sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_init(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("invalid crtc %pK\n", crtc);
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	INIT_LIST_HEAD(&sde_crtc->active_list);
+	INIT_LIST_HEAD(&sde_crtc->dirty_list);
+	INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
+						   char *name,
+						   u32 feature)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	struct sde_cp_prop_attach prop_attach;
+	uint64_t val = 0;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
+					   name, 0);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, NULL,
+				feature, NULL, val);
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
+					     char *name,
+					     const struct sde_pp_blk *pp_blk,
+					     u32 feature, void *ops,
+					     uint64_t min, uint64_t max,
+					     uint64_t val)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+			  SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_range(crtc->dev, 0, name, min, max);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, pp_blk,
+				feature, ops, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+					     const struct sde_pp_blk *pp_blk,
+					     u32 feature, void *ops)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	uint64_t val = 0;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create(crtc->dev,
+					   DRM_MODE_PROP_BLOB, name, 0);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node, pp_blk,
+				feature, ops, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+
+static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
+				   struct sde_crtc *sde_crtc)
+{
+	struct sde_hw_cp_cfg hw_cfg;
+	u32 num_mixers = sde_crtc->num_mixers;
+	int i = 0;
+	bool is_dspp = true;
+	bool feature_enabled = false;
+
+	if (!prop_node->dspp_feature_op && !prop_node->lm_feature_op) {
+		DRM_ERROR("ops not set for dspp/lm\n");
+		return;
+	}
+
+	is_dspp = !prop_node->lm_feature_op;
+	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+
+	for (i = 0; i < num_mixers; i++) {
+		if (is_dspp) {
+			if (!sde_crtc->mixers[i].hw_dspp)
+				continue;
+			prop_node->dspp_feature_op(sde_crtc->mixers[i].hw_dspp,
+						   &hw_cfg);
+		} else {
+			if (!sde_crtc->mixers[i].hw_lm)
+				continue;
+			prop_node->lm_feature_op(sde_crtc->mixers[i].hw_lm,
+						 &hw_cfg);
+		}
+	}
+
+	if (feature_enabled) {
+		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
+				 prop_node->property_id);
+		list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+	} else {
+		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
+			 prop_node->property_id);
+		list_del_init(&prop_node->active_list);
+	}
+	/* Programming of feature done remove from dirty list */
+	list_del_init(&prop_node->dirty_list);
+}
+
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	bool set_dspp_flush = false, set_lm_flush = false;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+	struct sde_hw_ctl *ctl;
+	uint32_t flush_mask = 0;
+	u32 num_mixers = 0, i = 0;
+
+	if (!crtc || !crtc->dev) {
+		DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
+			  (crtc ? crtc->dev : NULL));
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	num_mixers = sde_crtc->num_mixers;
+	if (!num_mixers) {
+		DRM_DEBUG_DRIVER("no mixers for this crtc\n");
+		return;
+	}
+
+	/* Check if dirty list is empty for early return */
+	if (list_empty(&sde_crtc->dirty_list)) {
+		DRM_DEBUG_DRIVER("Dirty list is empty\n");
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
+							dirty_list) {
+		sde_cp_crtc_setfeature(prop_node, sde_crtc);
+		/* Set the flush flag to true */
+		if (prop_node->dspp_feature_op)
+			set_dspp_flush = true;
+		else
+			set_lm_flush = true;
+	}
+
+	for (i = 0; i < num_mixers; i++) {
+		ctl = sde_crtc->mixers[i].hw_ctl;
+		if (!ctl)
+			continue;
+		if (set_dspp_flush && ctl->ops.get_bitmask_dspp
+				&& sde_crtc->mixers[i].hw_dspp)
+			ctl->ops.get_bitmask_dspp(ctl,
+					&flush_mask,
+					sde_crtc->mixers[i].hw_dspp->idx);
+			ctl->ops.update_pending_flush(ctl, flush_mask);
+		if (set_lm_flush && ctl->ops.get_bitmask_mixer
+				&& sde_crtc->mixers[i].hw_lm)
+			flush_mask = ctl->ops.get_bitmask_mixer(ctl,
+					sde_crtc->mixers[i].hw_lm->idx);
+			ctl->ops.update_pending_flush(ctl, flush_mask);
+	}
+}
+
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
+{
+	struct sde_kms *kms = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	unsigned long features = 0;
+	int idx = 0, i = 0;
+	struct msm_drm_private *priv;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_hw_mixer *hw_mixer = NULL;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DRM_ERROR("invalid crtc %pK dev %pK\n",
+		       crtc, ((crtc) ? crtc->dev : NULL));
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	kms = get_kms(crtc);
+	if (!kms || !kms->catalog || !sde_crtc) {
+		DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
+		 kms, ((kms) ? kms->catalog : NULL), sde_crtc);
+		return;
+	}
+
+	/**
+	 * Function can be called during the atomic_check with test_only flag
+	 * and actual commit. Allocate properties only if feature list is
+	 * empty during the atomic_check with test_only flag.
+	 */
+	if (!list_empty(&sde_crtc->feature_list))
+		return;
+
+	catalog = kms->catalog;
+	idx = sde_cp_crtc_get_mixer_idx(sde_crtc);
+	if (idx < 0 || idx >= catalog->mixer_count) {
+		DRM_ERROR("invalid idx %d\n", idx);
+		return;
+	}
+
+	priv = crtc->dev->dev_private;
+	/**
+	 * DSPP/LM properties are global to all the CRTCS.
+	 * Properties are created for first CRTC and re-used for later
+	 * crtcs.
+	 */
+	if (!priv->cp_property) {
+		priv->cp_property = kzalloc((sizeof(priv->cp_property) *
+				SDE_CP_CRTC_MAX_FEATURES), GFP_KERNEL);
+		setup_dspp_prop_install_funcs(dspp_prop_install_func);
+		setup_lm_prop_install_funcs(lm_prop_install_func);
+	}
+	if (!priv->cp_property)
+		return;
+
+	if (idx >= catalog->dspp_count)
+		goto lm_property;
+
+	/* Check for all the DSPP properties and attach it to CRTC */
+	hw_dspp = sde_crtc->mixers[0].hw_dspp;
+	features = (hw_dspp) ? hw_dspp->cap->features : 0;
+
+	if (!hw_dspp || !hw_dspp->cap->sblk || !features)
+		goto lm_property;
+
+	for (i = 0; i < SDE_DSPP_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		if (dspp_prop_install_func[i])
+			dspp_prop_install_func[i](crtc, hw_dspp);
+	}
+
+lm_property:
+	/* Check for all the LM properties and attach it to CRTC */
+	hw_mixer = sde_crtc->mixers[0].hw_lm;
+	features = (hw_mixer) ? hw_mixer->cap->features : 0;
+
+	if (!hw_mixer || !hw_mixer->cap->sblk || !features)
+		return;
+
+	for (i = 0; i < SDE_MIXER_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		if (lm_prop_install_func[i])
+			lm_prop_install_func[i](crtc, hw_mixer);
+	}
+}
+
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+				struct drm_property *property,
+				uint64_t val)
+{
+	struct sde_cp_node *prop_node = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+	int ret = 0;
+	u8 found = 0;
+
+	if (!crtc || !property) {
+		DRM_ERROR("invalid crtc %pK property %pK\n", crtc, property);
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (property->base.id == prop_node->property_id) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+
+	/* remove the property from dirty list */
+	list_del_init(&prop_node->dirty_list);
+
+	if (!val)
+		ret = sde_cp_disable_crtc_property(crtc, property, prop_node);
+	else
+		ret = sde_cp_enable_crtc_property(crtc, property,
+						  prop_node, val);
+
+	if (!ret) {
+		/* remove the property from active list */
+		list_del_init(&prop_node->active_list);
+		/* Mark the feature as dirty */
+		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+	}
+	return ret;
+}
+
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+			     struct drm_property *property, uint64_t *val)
+{
+	struct sde_cp_node *prop_node = NULL;
+	struct sde_crtc *sde_crtc = NULL;
+
+	if (!crtc || !property || !val) {
+		DRM_ERROR("invalid crtc %pK property %pK val %pK\n",
+			  crtc, property, val);
+		return -EINVAL;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return -EINVAL;
+	}
+	/* Return 0 if property is not supported */
+	*val = 0;
+	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+		if (property->base.id == prop_node->property_id) {
+			*val = prop_node->prop_val;
+			break;
+		}
+	}
+	return 0;
+}
+
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("invalid crtc %pK\n", crtc);
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->feature_list,
+				 feature_list) {
+		if (prop_node->prop_flags & DRM_MODE_PROP_BLOB
+		    && prop_node->blob_ptr)
+			drm_property_unreference_blob(prop_node->blob_ptr);
+
+		list_del_init(&prop_node->active_list);
+		list_del_init(&prop_node->dirty_list);
+		list_del_init(&prop_node->feature_list);
+		sde_cp_destroy_local_blob(prop_node);
+		kfree(prop_node);
+	}
+
+	INIT_LIST_HEAD(&sde_crtc->active_list);
+	INIT_LIST_HEAD(&sde_crtc->dirty_list);
+	INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_suspend(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = NULL;
+	struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+	if (!crtc) {
+		DRM_ERROR("crtc %pK\n", crtc);
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	if (!sde_crtc) {
+		DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+		return;
+	}
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
+				 active_list) {
+		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		list_del_init(&prop_node->active_list);
+	}
+}
+
+void sde_cp_crtc_resume(struct drm_crtc *crtc)
+{
+	/* placeholder for operations needed during resume */
+}
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp)
+{
+	char feature_name[256];
+	u32 version;
+
+	version = hw_dspp->cap->sblk->pcc.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_PCC_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_create_blob_property(crtc, feature_name,
+					&hw_dspp->cap->sblk->pcc,
+					SDE_CP_CRTC_DSPP_PCC,
+					hw_dspp->ops.setup_pcc);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp)
+{
+	char feature_name[256];
+	u32 version;
+
+	version = hw_dspp->cap->sblk->hsic.version >> 16;
+	switch (version) {
+	case 1:
+		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+			"SDE_DSPP_HUE_V", version);
+		sde_cp_crtc_install_range_property(crtc, feature_name,
+			&hw_dspp->cap->sblk->hsic,
+			SDE_CP_CRTC_DSPP_HUE, hw_dspp->ops.setup_hue,
+			0, U32_MAX, 0);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp)
+{
+	char feature_name[256];
+	u32 version;
+
+	version = hw_dspp->cap->sblk->vlut.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_VLUT_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_install_range_property(crtc, feature_name,
+			&hw_dspp->cap->sblk->vlut,
+			SDE_CP_CRTC_DSPP_VLUT, hw_dspp->ops.setup_vlut,
+			0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc,
+			SDE_CP_CRTC_DSPP_VLUT,
+			sizeof(struct drm_msm_pa_vlut));
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void dspp_ad_install_property(struct drm_crtc *crtc,
+					struct sde_hw_dspp *hw_dspp)
+{
+	char feature_name[256];
+	u32 version;
+
+	version = hw_dspp->cap->sblk->ad.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		"SDE_DSPP_AD_V", version);
+	switch (version) {
+	case 3:
+		sde_cp_crtc_install_immutable_property(crtc,
+			feature_name, SDE_CP_CRTC_DSPP_AD);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
+static void lm_gc_install_property(struct drm_crtc *crtc,
+				     struct sde_hw_mixer *hw_mixer)
+{
+	char feature_name[256];
+	u32 version;
+
+	version = hw_mixer->cap->sblk->gc.version >> 16;
+	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+		 "SDE_LM_GC_V", version);
+	switch (version) {
+	case 1:
+		sde_cp_crtc_create_blob_property(crtc, feature_name,
+			&hw_mixer->cap->sblk->gc,
+			SDE_CP_CRTC_LM_GC,
+			hw_mixer->ops.setup_gc);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
new file mode 100644
index 0000000..dbe52a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -0,0 +1,84 @@
+
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_COLOR_PROCESSING_H
+#define _SDE_COLOR_PROCESSING_H
+#include <drm/drm_crtc.h>
+
+/**
+ * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
+ *                     Should be called during crtc initialization.
+ * @crtc:  Pointer to sde_crtc.
+ */
+void sde_cp_crtc_init(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_install_properties(): Installs the color processing
+ *                                properties for a crtc.
+ *                                Should be called during crtc initialization.
+ * @crtc:  Pointer to crtc.
+ */
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_destroy_properties: Destroys color processing
+ *                                            properties for a crtc.
+ * should be called during crtc de-initialization.
+ * @crtc:  Pointer to crtc.
+ */
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_set_property: Set a color processing property
+ *                                      for a crtc.
+ *                                      Should be during atomic set property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ */
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+				struct drm_property *property, uint64_t val);
+
+/**
+ * sde_cp_crtc_apply_properties: Enable/disable properties
+ *                               for a crtc.
+ *                               Should be called during atomic commit call.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_get_property: Get value of color processing property
+ *                                      for a crtc.
+ *                                      Should be during atomic get property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ *
+ */
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+				struct drm_property *property, uint64_t *val);
+
+/**
+ * sde_cp_crtc_suspend: Suspend the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_suspend(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_resume: Resume the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_resume(struct drm_crtc *crtc);
+#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 403160c..cca934d 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -239,6 +239,20 @@
 	((S) ? to_sde_connector_state((S))->out_fb : 0)
 
 /**
+ * sde_connector_get_topology_name - helper accessor to retrieve topology_name
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
+ */
+static inline uint64_t sde_connector_get_topology_name(
+		struct drm_connector *connector)
+{
+	if (!connector || !connector->state)
+		return 0;
+	return sde_connector_get_property(connector->state,
+			CONNECTOR_PROP_TOPOLOGY_NAME);
+}
+
+/**
  * sde_connector_init - create drm connector object for a given display
  * @dev: Pointer to drm device struct
  * @encoder: Pointer to associated encoder
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 6e33101..3aa4c65 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -42,11 +42,6 @@
 			irq_idx);
 }
 
-static void sde_core_irq_intf_error_handler(void *arg, int irq_idx)
-{
-	SDE_ERROR("INTF underrun detected, irq_idx=%d\n", irq_idx);
-}
-
 int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
 		enum sde_intr_type intr_type, u32 instance_idx)
 {
@@ -191,36 +186,6 @@
 
 int sde_core_irq_postinstall(struct sde_kms *sde_kms)
 {
-	struct msm_drm_private *priv;
-	struct sde_irq_callback irq_cb;
-	int irq_idx;
-	int i;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid sde_kms\n");
-		return -EINVAL;
-	} else if (!sde_kms->dev) {
-		SDE_ERROR("invalid drm device\n");
-		return -EINVAL;
-	} else if (!sde_kms->dev->dev_private) {
-		SDE_ERROR("invalid device private\n");
-		return -EINVAL;
-	}
-	priv = sde_kms->dev->dev_private;
-
-	irq_cb.func = sde_core_irq_intf_error_handler;
-	irq_cb.arg  = sde_kms;
-
-	/* Register interface underrun callback */
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-	for (i = 0; i < sde_kms->catalog->intf_count; i++) {
-		irq_idx = sde_core_irq_idx_lookup(sde_kms,
-				SDE_IRQ_TYPE_INTF_UNDER_RUN, i+INTF_0);
-		sde_core_irq_register_callback(sde_kms, irq_idx, &irq_cb);
-		sde_core_irq_enable(sde_kms, &irq_idx, 1);
-	}
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 5615885..b38c2df 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,14 +24,7 @@
 #include "sde_hw_lm.h"
 #include "sde_hw_ctl.h"
 #include "sde_crtc.h"
-
-#define CTL(i)       (CTL_0 + (i))
-#define LM(i)        (LM_0  + (i))
-#define INTF(i)      (INTF_0 + (i))
-
-/* uncomment to enable higher level IRQ msg's */
-/*#define DBG_IRQ      DBG*/
-#define DBG_IRQ(fmt, ...)
+#include "sde_color_processing.h"
 
 /* default input fence timeout, in ms */
 #define SDE_CRTC_INPUT_FENCE_TIMEOUT    2000
@@ -47,7 +40,7 @@
 #define LEFT_MIXER 0
 #define RIGHT_MIXER 1
 
-static struct sde_kms *get_kms(struct drm_crtc *crtc)
+static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv = crtc->dev->dev_private;
 
@@ -58,13 +51,18 @@
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	if (!crtc)
 		return;
 
+	if (sde_crtc->blob_info)
+		drm_property_unreference_blob(sde_crtc->blob_info);
 	msm_property_destroy(&sde_crtc->property_info);
+	sde_cp_crtc_destroy_properties(crtc);
+
 	debugfs_remove_recursive(sde_crtc->debugfs_root);
+	mutex_destroy(&sde_crtc->crtc_lock);
 	sde_fence_deinit(&sde_crtc->output_fence);
 
 	drm_crtc_cleanup(crtc);
@@ -75,24 +73,17 @@
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
 {
-	DBG("");
+	SDE_DEBUG("\n");
 
-	if (msm_is_mode_seamless(adjusted_mode)) {
-		SDE_DEBUG("seamless mode set requested\n");
-		if (!crtc->enabled || crtc->state->active_changed) {
-			SDE_ERROR("crtc state prevents seamless transition\n");
-			return false;
-		}
+	if (msm_is_mode_seamless(adjusted_mode) &&
+		(!crtc->enabled || crtc->state->active_changed)) {
+		SDE_ERROR("crtc state prevents seamless transition\n");
+		return false;
 	}
 
 	return true;
 }
 
-static void sde_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
-	DBG("");
-}
-
 static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
 	struct sde_plane_state *pstate, struct sde_format *format)
 {
@@ -295,7 +286,7 @@
 		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
 
 		SDE_DEBUG("lm %d ctl %d add mask 0x%x to pending flush\n",
-			mixer->hw_lm->idx, ctl->idx, mixer[i].flush_mask);
+			mixer[i].hw_lm->idx, ctl->idx, mixer[i].flush_mask);
 
 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
 			&sde_crtc->stage_cfg, i);
@@ -319,7 +310,8 @@
 }
 
 /* if file!=NULL, this is preclose potential cancel-flip path */
-static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
+		struct drm_file *file)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -335,10 +327,10 @@
 		 */
 		if (!file || (event->base.file_priv == file)) {
 			sde_crtc->event = NULL;
-			SDE_DEBUG("%s: send event: %pK\n",
+			DRM_DEBUG_VBL("%s: send event: %pK\n",
 						sde_crtc->name, event);
-			drm_send_vblank_event(dev, sde_crtc->drm_crtc_id,
-					event);
+			MSM_EVT(crtc->dev, crtc->base.id, 0);
+			drm_crtc_send_vblank_event(crtc, event);
 		}
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -347,24 +339,10 @@
 static void sde_crtc_vblank_cb(void *data)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *)data;
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_kms *sde_kms = get_kms(crtc);
-	struct drm_device *dev = sde_kms->dev;
-	unsigned int pending;
 
-	pending = atomic_xchg(&sde_crtc->pending, 0);
-
-	if (pending & PENDING_FLIP) {
-		complete_flip(crtc, NULL);
-		/* free ref count paired with the atomic_flush */
-		drm_crtc_vblank_put(crtc);
-	}
-
-	if (atomic_read(&sde_crtc->drm_requested_vblank)) {
-		drm_handle_vblank(dev, sde_crtc->drm_crtc_id);
-		DBG_IRQ("");
-		MSM_EVT(crtc->dev, crtc->base.id, 0);
-	}
+	drm_crtc_handle_vblank(crtc);
+	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
+	MSM_EVT(crtc->dev, crtc->base.id, 0);
 }
 
 void sde_crtc_complete_commit(struct drm_crtc *crtc)
@@ -379,54 +357,6 @@
 }
 
 /**
- * _sde_crtc_trigger_kickoff - Iterate through the control paths and trigger
- *	the hw_ctl object to flush any pending flush mask, and trigger
- *	control start if the interface types require it.
- *
- *	This is currently designed to be called only once per crtc, per flush.
- *	It should be called from the encoder, through the
- *	sde_encoder_schedule_kickoff callflow, after all the encoders are ready
- *	to have CTL_START triggered.
- *
- *	It is called from the commit thread context.
- * @data: crtc pointer
- */
-static void _sde_crtc_trigger_kickoff(void *data)
-{
-	struct drm_crtc *crtc = (struct drm_crtc *)data;
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_mixer *mixer;
-	struct sde_hw_ctl *ctl;
-	int i;
-
-	if (!data) {
-		SDE_ERROR("invalid argument\n");
-		return;
-	}
-
-	MSM_EVT(crtc->dev, crtc->base.id, 0);
-
-	/* Commit all pending flush masks to hardware */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (ctl) {
-			ctl->ops.trigger_flush(ctl);
-			MSM_EVT(crtc->dev, crtc->base.id, ctl->idx);
-		}
-	}
-
-	/* Signal start to any interface types that require it */
-	for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
-		mixer = &sde_crtc->mixers[i];
-		ctl = mixer->hw_ctl;
-		if (ctl && sde_encoder_needs_ctl_start(mixer->encoder)) {
-			ctl->ops.trigger_start(ctl);
-			MSM_EVT(crtc->dev, crtc->base.id, ctl->idx);
-		}
-	}
-}
-
-/**
  * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
  * @cstate: Pointer to sde crtc state
  */
@@ -449,10 +379,9 @@
 {
 	struct drm_plane *plane = NULL;
 	uint32_t wait_ms = 1;
-	u64 ktime_end;
-	s64 ktime_wait; /* need signed 64-bit type */
+	ktime_t kt_end, kt_wait;
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	if (!crtc || !crtc->state) {
 		SDE_ERROR("invalid crtc/state %pK\n", crtc);
@@ -460,8 +389,8 @@
 	}
 
 	/* use monotonic timer to limit total fence wait time */
-	ktime_end = ktime_get_ns() +
-		to_sde_crtc_state(crtc->state)->input_fence_timeout_ns;
+	kt_end = ktime_add_ns(ktime_get(),
+		to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
 
 	/*
 	 * Wait for fences sequentially, as all of them need to be signalled
@@ -475,9 +404,9 @@
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		if (wait_ms) {
 			/* determine updated wait time */
-			ktime_wait = ktime_end - ktime_get_ns();
-			if (ktime_wait >= 0)
-				wait_ms = ktime_wait / NSEC_PER_MSEC;
+			kt_wait = ktime_sub(kt_end, ktime_get());
+			if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
+				wait_ms = ktime_to_ms(kt_wait);
 			else
 				wait_ms = 0;
 		}
@@ -490,15 +419,16 @@
 		struct drm_encoder *enc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_kms *sde_kms = get_kms(crtc);
+	struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
 	struct sde_rm *rm = &sde_kms->rm;
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *last_valid_ctl = NULL;
 	int i;
-	struct sde_rm_hw_iter lm_iter, ctl_iter;
+	struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
 
 	sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
 	sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
+	sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
 
 	/* Set up all the mixers and ctls reserved by this encoder */
 	for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
@@ -525,6 +455,10 @@
 			return;
 		}
 
+		/* Dspp may be null */
+		(void) sde_rm_get_hw(rm, &dspp_iter);
+		mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
+
 		mixer->encoder = enc;
 
 		sde_crtc->num_mixers++;
@@ -543,6 +477,7 @@
 	sde_crtc->num_mixers = 0;
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 
+	mutex_lock(&sde_crtc->crtc_lock);
 	/* Check for mixers on all encoders attached to this crtc */
 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
 		if (enc->crtc != crtc)
@@ -550,6 +485,7 @@
 
 		_sde_crtc_setup_mixer_for_encoder(crtc, enc);
 	}
+	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
 static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -560,7 +496,7 @@
 	unsigned long flags;
 	u32 i;
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -598,6 +534,7 @@
 		return;
 
 	_sde_crtc_blend_setup(crtc);
+	sde_cp_crtc_apply_properties(crtc);
 
 	/*
 	 * PP_DONE irq is only used by command mode for now.
@@ -608,17 +545,6 @@
 	 */
 }
 
-static void request_pending(struct drm_crtc *crtc, u32 pending)
-{
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
-	atomic_or(pending, &sde_crtc->pending);
-
-	/* ref count the vblank event and interrupts over the atomic commit */
-	if (drm_crtc_vblank_get(crtc))
-		return;
-}
-
 static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_crtc_state)
 {
@@ -632,7 +558,7 @@
 		return;
 	}
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	sde_crtc = to_sde_crtc(crtc);
 
@@ -665,8 +591,6 @@
 	drm_atomic_crtc_for_each_plane(plane, crtc)
 		sde_plane_flush(plane);
 
-	request_pending(crtc, PENDING_FLIP);
-
 	/* Kickoff will be scheduled by outer layer */
 }
 
@@ -689,7 +613,7 @@
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(state);
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	__drm_atomic_helper_crtc_destroy_state(crtc, state);
 
@@ -717,8 +641,7 @@
 		 * Encoder will flush/start now, unless it has a tx pending.
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
-		sde_encoder_schedule_kickoff(encoder, _sde_crtc_trigger_kickoff,
-				crtc);
+		sde_encoder_schedule_kickoff(encoder);
 	}
 }
 
@@ -800,15 +723,17 @@
 	struct sde_crtc *sde_crtc;
 
 	if (!crtc) {
-		DRM_ERROR("invalid crtc\n");
+		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 
-	DBG("");
+	SDE_DEBUG("\n");
 
+	mutex_lock(&sde_crtc->crtc_lock);
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
+	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
 static void sde_crtc_enable(struct drm_crtc *crtc)
@@ -825,7 +750,7 @@
 		return;
 	}
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	sde_crtc = to_sde_crtc(crtc);
 	mixer = sde_crtc->mixers;
@@ -850,8 +775,28 @@
 struct plane_state {
 	struct sde_plane_state *sde_pstate;
 	struct drm_plane_state *drm_pstate;
+
+	int stage;
 };
 
+static int pstate_cmp(const void *a, const void *b)
+{
+	struct plane_state *pa = (struct plane_state *)a;
+	struct plane_state *pb = (struct plane_state *)b;
+	int rc = 0;
+	int pa_zpos, pb_zpos;
+
+	pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
+	pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
+
+	if (pa_zpos != pb_zpos)
+		rc = pa_zpos - pb_zpos;
+	else
+		rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
+
+	return rc;
+}
+
 static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -871,6 +816,12 @@
 		return -EINVAL;
 	}
 
+	if (!state->enable || !state->active) {
+		SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+				crtc->base.id, state->enable, state->active);
+		return 0;
+	}
+
 	sde_crtc = to_sde_crtc(crtc);
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
@@ -879,16 +830,20 @@
 
 	 /* get plane state for all drm planes associated with crtc state */
 	drm_atomic_crtc_state_for_each_plane(plane, state) {
-		pstate = state->state->plane_states[drm_plane_index(plane)];
-
-		/* plane might not have changed, in which case take
-		 * current state:
-		 */
-		if (!pstate)
-			pstate = plane->state;
+		pstate = drm_atomic_get_plane_state(state->state, plane);
+		if (IS_ERR_OR_NULL(pstate)) {
+			rc = PTR_ERR(pstate);
+			SDE_ERROR("%s: failed to get plane%d state, %d\n",
+					sde_crtc->name, plane->base.id, rc);
+			goto end;
+		}
+		if (cnt >= ARRAY_SIZE(pstates))
+			continue;
 
 		pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
 		pstates[cnt].drm_pstate = pstate;
+		pstates[cnt].stage = sde_plane_get_property(
+				pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
 		cnt++;
 
 		if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
@@ -904,11 +859,30 @@
 		}
 	}
 
-	for (i = 0; i < cnt; i++) {
-		z_pos = sde_plane_get_property(pstates[i].sde_pstate,
-			PLANE_PROP_ZPOS);
+	if (!sde_is_custom_client()) {
+		int stage_old = pstates[0].stage;
 
-		if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+		/* assign mixer stages based on sorted zpos property */
+		sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+		z_pos = 0;
+		for (i = 0; i < cnt; i++) {
+			if (stage_old != pstates[i].stage)
+				++z_pos;
+			stage_old = pstates[i].stage;
+			pstates[i].stage = z_pos;
+		}
+	}
+
+	for (i = 0; i < cnt; i++) {
+		z_pos = pstates[i].stage;
+
+		/* verify z_pos setting before using it */
+		if (z_pos >= SDE_STAGE_MAX) {
+			SDE_ERROR("> %d plane stages assigned\n",
+					SDE_STAGE_MAX - SDE_STAGE_0);
+			rc = -EINVAL;
+			goto end;
+		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
 			if (left_crtc_zpos_cnt[z_pos] == 2) {
 				SDE_ERROR("> 2 plane @ stage%d on left\n",
 					z_pos);
@@ -925,7 +899,7 @@
 			}
 			right_crtc_zpos_cnt[z_pos]++;
 		}
-		pstates[i].sde_pstate->stage = z_pos;
+		pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
 		SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
 	}
 
@@ -935,7 +909,6 @@
 
 int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct drm_encoder *encoder;
 	struct drm_device *dev = crtc->dev;
 
@@ -944,13 +917,7 @@
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
 			continue;
-		/*
-		 * Mark that framework requested vblank,
-		 * as opposed to enabling vblank only for our internal purposes
-		 * Currently this variable isn't required, but may be useful for
-		 * future features
-		 */
-		atomic_set(&sde_crtc->drm_requested_vblank, en);
+
 		MSM_EVT(crtc->dev, crtc->base.id, en);
 
 		if (en)
@@ -964,25 +931,41 @@
 	return 0;
 }
 
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	SDE_DEBUG("%s: cancel: %p", sde_crtc->name, file);
+	_sde_crtc_complete_flip(crtc, file);
+}
+
 /**
  * sde_crtc_install_properties - install all drm properties for crtc
  * @crtc: Pointer to drm crtc structure
  */
-static void sde_crtc_install_properties(struct drm_crtc *crtc)
+static void sde_crtc_install_properties(struct drm_crtc *crtc,
+				struct sde_mdss_cfg *catalog)
 {
 	struct sde_crtc *sde_crtc;
 	struct drm_device *dev;
+	struct sde_kms_info *info;
 
-	DBG("");
+	SDE_DEBUG("\n");
 
-	if (!crtc) {
-		SDE_ERROR("invalid crtc\n");
+	if (!crtc || !catalog) {
+		SDE_ERROR("invalid crtc or catalog\n");
 		return;
 	}
 
 	sde_crtc = to_sde_crtc(crtc);
 	dev = crtc->dev;
 
+	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	if (!info) {
+		SDE_ERROR("failed to allocate info memory\n");
+		return;
+	}
+
 	/* range properties */
 	msm_property_install_range(&sde_crtc->property_info,
 		"input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
@@ -994,6 +977,25 @@
 	msm_property_install_range(&sde_crtc->property_info,
 			"output_fence_offset", 0x0, 0, 1, 0,
 			CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
+		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
+	sde_kms_info_reset(info);
+
+	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
+	sde_kms_info_add_keyint(info, "max_linewidth",
+			catalog->max_mixer_width);
+	sde_kms_info_add_keyint(info, "max_blendstages",
+			catalog->max_mixer_blendstages);
+	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
+		sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
+	if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
+		sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
+	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
+			info->data, info->len, CRTC_PROP_INFO);
+
+	kfree(info);
 }
 
 /**
@@ -1026,7 +1028,12 @@
 					property);
 			if (idx == CRTC_PROP_INPUT_FENCE_TIMEOUT)
 				_sde_crtc_set_input_fence_timeout(cstate);
+		} else {
+			ret = sde_cp_crtc_set_property(crtc,
+					property, val);
 		}
+		if (ret)
+			DRM_ERROR("failed to set the property\n");
 	}
 
 	return ret;
@@ -1042,7 +1049,7 @@
 static int sde_crtc_set_property(struct drm_crtc *crtc,
 		struct drm_property *property, uint64_t val)
 {
-	DBG("");
+	SDE_DEBUG("\n");
 
 	return sde_crtc_atomic_set_property(crtc, crtc->state, property, val);
 }
@@ -1082,55 +1089,121 @@
 			ret = msm_property_atomic_get(&sde_crtc->property_info,
 					cstate->property_values,
 					cstate->property_blobs, property, val);
+			if (ret)
+				ret = sde_cp_crtc_get_property(crtc,
+					property, val);
 		}
+		if (ret)
+			DRM_ERROR("get property failed\n");
 	}
-
 	return ret;
 }
 
-static int _sde_debugfs_mixer_read(struct seq_file *s, void *data)
+static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 {
 	struct sde_crtc *sde_crtc;
+	struct sde_plane_state *pstate = NULL;
 	struct sde_crtc_mixer *m;
-	int i, j, k;
+
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
+
+	int i, out_width;
 
 	if (!s || !s->private)
 		return -EINVAL;
 
 	sde_crtc = s->private;
+	crtc = &sde_crtc->base;
+
+	mutex_lock(&sde_crtc->crtc_lock);
+	mode = &crtc->state->adjusted_mode;
+	out_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+				mode->hdisplay, mode->vdisplay);
+
+	seq_puts(s, "\n");
+
 	for (i = 0; i < sde_crtc->num_mixers; ++i) {
 		m = &sde_crtc->mixers[i];
-		if (!m->hw_lm) {
-			seq_printf(s, "Mixer[%d] has no LM\n", i);
-		} else if (!m->hw_ctl) {
-			seq_printf(s, "Mixer[%d] has no CTL\n", i);
-		} else {
-			seq_printf(s, "LM_%d/CTL_%d\n",
-					m->hw_lm->idx - LM_0,
-					m->hw_ctl->idx - CTL_0);
-		}
+		if (!m->hw_lm)
+			seq_printf(s, "\tmixer[%d] has no lm\n", i);
+		else if (!m->hw_ctl)
+			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+		else
+			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+				m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+				out_width, mode->vdisplay);
 	}
 
-	for (k = 0; k < sde_crtc->num_mixers; ++k) {
-		seq_printf(s, "Mixer[%d] stages\n", k);
-		for (i = 0; i < SDE_STAGE_MAX; ++i) {
-			if (i == SDE_STAGE_BASE)
-				seq_puts(s, "Base Stage:");
-			else
-				seq_printf(s, "Stage %d:", i - SDE_STAGE_0);
+	seq_puts(s, "\n");
 
-			for (j = 0; j < PIPES_PER_STAGE; ++j)
-				seq_printf(s, " % 2d",
-					sde_crtc->stage_cfg.stage[k][i][j]);
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		pstate = to_sde_plane_state(plane->state);
+		state = plane->state;
+
+		if (!pstate || !state)
+			continue;
+
+		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+			pstate->stage);
+
+		if (plane->state->fb) {
+			fb = plane->state->fb;
+
+			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u bpp:%d\n",
+				fb->base.id, (char *) &fb->pixel_format,
+				fb->width, fb->height, fb->bits_per_pixel);
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->modifier); i++)
+				seq_printf(s, "modifier[%d]:%8llu ", i,
+							fb->modifier[i]);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+				seq_printf(s, "pitches[%d]:%8u ", i,
+							fb->pitches[i]);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+				seq_printf(s, "offsets[%d]:%8u ", i,
+							fb->offsets[i]);
 			seq_puts(s, "\n");
 		}
+
+		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+			state->src_x, state->src_y, state->src_w, state->src_h);
+
+		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+			state->crtc_x, state->crtc_y, state->crtc_w,
+			state->crtc_h);
+		seq_puts(s, "\n");
 	}
+	mutex_unlock(&sde_crtc->crtc_lock);
+
 	return 0;
 }
 
-static int _sde_debugfs_mixer_open(struct inode *inode, struct file *file)
+static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, _sde_debugfs_mixer_read, inode->i_private);
+	return single_open(file, _sde_debugfs_status_show, inode->i_private);
+}
+
+static void sde_crtc_suspend(struct drm_crtc *crtc)
+{
+	sde_cp_crtc_suspend(crtc);
+}
+
+static void sde_crtc_resume(struct drm_crtc *crtc)
+{
+	sde_cp_crtc_resume(crtc);
 }
 
 static const struct drm_crtc_funcs sde_crtc_funcs = {
@@ -1143,11 +1216,12 @@
 	.reset = sde_crtc_reset,
 	.atomic_duplicate_state = sde_crtc_duplicate_state,
 	.atomic_destroy_state = sde_crtc_destroy_state,
+	.save = sde_crtc_suspend,
+	.restore = sde_crtc_resume,
 };
 
 static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
 	.mode_fixup = sde_crtc_mode_fixup,
-	.mode_set_nofb = sde_crtc_mode_set_nofb,
 	.disable = sde_crtc_disable,
 	.enable = sde_crtc_enable,
 	.atomic_check = sde_crtc_atomic_check,
@@ -1158,8 +1232,8 @@
 static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
 		struct sde_kms *sde_kms)
 {
-	static const struct file_operations debugfs_mixer_fops = {
-		.open =		_sde_debugfs_mixer_open,
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_sde_debugfs_status_open,
 		.read =		seq_read,
 		.llseek =	seq_lseek,
 		.release =	single_release,
@@ -1169,17 +1243,15 @@
 				sde_debugfs_get_root(sde_kms));
 		if (sde_crtc->debugfs_root) {
 			/* don't error check these */
-			debugfs_create_file("mixers", 0444,
+			debugfs_create_file("status", 0444,
 					sde_crtc->debugfs_root,
-					sde_crtc, &debugfs_mixer_fops);
+					sde_crtc, &debugfs_status_fops);
 		}
 	}
 }
 
 /* initialize crtc */
-struct drm_crtc *sde_crtc_init(struct drm_device *dev,
-		struct drm_plane *plane,
-		int drm_crtc_id)
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 {
 	struct drm_crtc *crtc = NULL;
 	struct sde_crtc *sde_crtc = NULL;
@@ -1194,9 +1266,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	crtc = &sde_crtc->base;
-
-	sde_crtc->drm_crtc_id = drm_crtc_id;
-	atomic_set(&sde_crtc->drm_requested_vblank, 0);
+	crtc->dev = dev;
 
 	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
 
@@ -1208,6 +1278,7 @@
 
 	/* initialize output fence support */
 	sde_fence_init(dev, &sde_crtc->output_fence, sde_crtc->name);
+	mutex_init(&sde_crtc->crtc_lock);
 
 	/* initialize debugfs support */
 	_sde_crtc_init_debugfs(sde_crtc, kms);
@@ -1218,7 +1289,8 @@
 			CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
 			sizeof(struct sde_crtc_state));
 
-	sde_crtc_install_properties(crtc);
+	sde_crtc_install_properties(crtc, kms->catalog);
+	sde_cp_crtc_init(crtc);
 
 	SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
 	return crtc;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index cee9572..40dff398 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,15 +19,12 @@
 #include "sde_kms.h"
 
 #define SDE_CRTC_NAME_SIZE	12
-#define PENDING_FLIP		2
-/* worst case one frame wait time based on 30 FPS : 33.33ms*/
-#define CRTC_MAX_WAIT_ONE_FRAME     34
-#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages)
 
 /**
  * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
  * @hw_lm:	LM HW Driver context
  * @hw_ctl:	CTL Path HW driver context
+ * @hw_dspp:	DSPP HW driver context
  * @encoder:	Encoder attached to this lm & ctl
  * @mixer_op_mode: mixer blending operation mode
  * @flush_mask:	mixer flush mask for ctl, mixer and pipe
@@ -35,6 +32,7 @@
 struct sde_crtc_mixer {
 	struct sde_hw_mixer *hw_lm;
 	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_dspp  *hw_dspp;
 	struct drm_encoder *encoder;
 	u32 mixer_op_mode;
 	u32 flush_mask;
@@ -44,13 +42,11 @@
  * struct sde_crtc - virtualized CRTC data structure
  * @base          : Base drm crtc structure
  * @name          : ASCII description of this crtc
- * @drm_crtc_id   : Id for reporting vblank. Id is relative init order into
- *                  mode_config.crtc_list and used by user space to identify
- *                  specific crtc in apis such as drm_wait_vblank
  * @num_ctls      : Number of ctl paths in use
  * @num_mixers    : Number of mixers in use
  * @mixer         : List of active mixers
- * @event         : Pointer to last received drm vblank event
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
  * @pending       : Whether or not an update is pending
  * @vsync_count   : Running count of received vsync events
  * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
@@ -58,32 +54,38 @@
  * @property_defaults : Array of default values for generic property support
  * @stage_cfg     : H/w mixer stage configuration
  * @debugfs_root  : Parent of debugfs node
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @crtc_lock     : crtc lock around create, destroy and access.
  */
 struct sde_crtc {
 	struct drm_crtc base;
 	char name[SDE_CRTC_NAME_SIZE];
 
-	int drm_crtc_id;
-
 	/* HW Resources reserved for the crtc */
 	u32 num_ctls;
 	u32 num_mixers;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
 
-	/*if there is a pending flip, these will be non-null */
 	struct drm_pending_vblank_event *event;
-	atomic_t pending;
 	u32 vsync_count;
-	atomic_t drm_requested_vblank;
 
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CRTC_PROP_COUNT];
+	struct drm_property_blob *blob_info;
 
 	/* output fence support */
 	struct sde_fence output_fence;
 
 	struct sde_hw_stage_cfg stage_cfg;
 	struct dentry *debugfs_root;
+
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+
+	struct mutex crtc_lock;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -137,4 +139,44 @@
 	return sde_crtc_mixer_width(sde_crtc, mode);
 }
 
+/**
+ * sde_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_prepare_fence - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_prepare_fence(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_init - create a new crtc object
+ * @dev: sde device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
+ * @crtc: Pointer to drm crtc object
+ * @file: client to cancel's file handle
+ */
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 67190a3..fd57404 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,10 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include "drm_crtc.h"
@@ -22,6 +26,13 @@
 #include "sde_formats.h"
 #include "sde_encoder_phys.h"
 #include "display_manager.h"
+#include "sde_color_processing.h"
+
+#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
 
 /*
  * Two to anticipate panels that can do cmd/vid dynamic switching
@@ -33,7 +44,8 @@
 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
 	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
 
-#define WAIT_TIMEOUT_MSEC 100
+/* Wait timeout sized on worst case of 4 60fps frames ~= 67ms */
+#define WAIT_TIMEOUT_MSEC 67
 
 /**
  * struct sde_encoder_virt - virtual encoder. Container of one or more physical
@@ -64,6 +76,9 @@
  *				kickoff. Bit0 = phys_encs[0] etc.
  * @pending_kickoff_wq:		Wait queue commit thread to wait on phys_encs
  *				become ready for kickoff in IRQ contexts
+ * @debugfs_root:		Debug file system root file node
+ * @enc_lock:			Lock around physical encoder create/destroy and
+				access.
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -81,6 +96,9 @@
 
 	unsigned int pending_kickoff_mask;
 	wait_queue_head_t pending_kickoff_wq;
+
+	struct dentry *debugfs_root;
+	struct mutex enc_lock;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -120,13 +138,24 @@
 
 static void bs_init(struct sde_encoder_virt *sde_enc)
 {
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
 	sde_enc->bus_scaling_client =
 	    msm_bus_scale_register_client(&mdp_bus_scale_table);
-	DBG("bus scale client: %08x", sde_enc->bus_scaling_client);
+	SDE_DEBUG_ENC(sde_enc, "bus scale client %08x\n",
+			sde_enc->bus_scaling_client);
 }
 
 static void bs_fini(struct sde_encoder_virt *sde_enc)
 {
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
 	if (sde_enc->bus_scaling_client) {
 		msm_bus_scale_unregister_client(sde_enc->bus_scaling_client);
 		sde_enc->bus_scaling_client = 0;
@@ -135,8 +164,13 @@
 
 static void bs_set(struct sde_encoder_virt *sde_enc, int idx)
 {
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
 	if (sde_enc->bus_scaling_client) {
-		DBG("set bus scaling: %d", idx);
+		SDE_DEBUG_ENC(sde_enc, "set bus scaling to %d\n", idx);
 		idx = 1;
 		msm_bus_scale_client_update_request(sde_enc->bus_scaling_client,
 						    idx);
@@ -163,14 +197,14 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	int i = 0;
 
-	DBG("");
-
 	if (!hw_res || !drm_enc || !conn_state) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+				drm_enc != 0, hw_res != 0, conn_state != 0);
 		return;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	/* Query resources used by phys encs, expected to be without overlap */
 	memset(hw_res, 0, sizeof(*hw_res));
@@ -184,39 +218,21 @@
 	}
 }
 
-bool sde_encoder_needs_ctl_start(struct drm_encoder *drm_enc)
-{
-	struct sde_encoder_virt *sde_enc = NULL;
-	struct sde_encoder_phys *phys;
-
-	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
-		return false;
-	}
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	phys = sde_enc->cur_master;
-
-	if (phys && phys->ops.needs_ctl_start)
-		return phys->ops.needs_ctl_start(phys);
-
-	return false;
-}
-
 static void sde_encoder_destroy(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
 	int i = 0;
 
-	DBG("");
-
 	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
 
-	for (i = 0; i < ARRAY_SIZE(sde_enc->phys_encs); i++) {
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys && phys->ops.destroy) {
@@ -226,13 +242,17 @@
 		}
 	}
 
-	if (sde_enc->num_phys_encs) {
-		DRM_ERROR("Expected num_phys_encs to be 0 not %d\n",
+	if (sde_enc->num_phys_encs)
+		SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
 				sde_enc->num_phys_encs);
-	}
+	sde_enc->num_phys_encs = 0;
+	mutex_unlock(&sde_enc->enc_lock);
 
 	drm_encoder_cleanup(drm_enc);
 	bs_fini(sde_enc);
+	debugfs_remove_recursive(sde_enc->debugfs_root);
+	mutex_destroy(&sde_enc->enc_lock);
+
 	kfree(sde_enc);
 }
 
@@ -249,14 +269,15 @@
 	int i = 0;
 	int ret = 0;
 
-	DBG("");
-
 	if (!drm_enc || !crtc_state || !conn_state) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+				drm_enc != 0, crtc_state != 0, conn_state != 0);
 		return -EINVAL;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 	mode = &crtc_state->mode;
@@ -275,8 +296,8 @@
 				ret = -EINVAL;
 
 		if (ret) {
-			SDE_ERROR("enc %d mode unsupported, phys %d\n",
-					drm_enc->base.id, i);
+			SDE_ERROR_ENC(sde_enc,
+					"mode unsupported, phys idx %d\n", i);
 			break;
 		}
 	}
@@ -286,8 +307,11 @@
 		ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
 				conn_state, true);
 
-	/* Call to populate mode->crtc* information required by framework */
-	drm_mode_set_crtcinfo(adj_mode, 0);
+	if (!ret) {
+		sde_cp_crtc_install_properties(drm_enc->crtc);
+		/* populate mode->crtc* information required by framework */
+		drm_mode_set_crtcinfo(adj_mode, 0);
+	}
 
 	MSM_EVT(drm_enc->dev, adj_mode->flags, adj_mode->private_flags);
 
@@ -305,14 +329,14 @@
 	struct drm_connector *conn = NULL, *conn_iter;
 	int i = 0, ret;
 
-	DBG("");
-
 	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 	connector_list = &sde_kms->dev->mode_config.connector_list;
@@ -324,8 +348,10 @@
 			conn = conn_iter;
 
 	if (!conn) {
-		SDE_ERROR("enc %d failed to find attached connector\n",
-				drm_enc->base.id);
+		SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
+		return;
+	} else if (!conn->state) {
+		SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
 		return;
 	}
 
@@ -333,40 +359,59 @@
 	ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
 			conn->state, false);
 	if (ret) {
-		SDE_ERROR("enc %d failed to reserve hw resources, ret %d\n",
-				drm_enc->base.id, ret);
+		SDE_ERROR_ENC(sde_enc,
+				"failed to reserve hw resources, %d\n", ret);
 		return;
 	}
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys && phys->ops.mode_set)
-			phys->ops.mode_set(phys, mode, adj_mode);
+		if (phys) {
+			phys->connector = conn->state->connector;
+			if (phys->ops.mode_set)
+				phys->ops.mode_set(phys, mode, adj_mode);
+		}
 	}
 }
 
 static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
 	int i = 0;
 
-	DBG("");
-
 	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!drm_enc->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
 		return;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
 	MSM_EVT(drm_enc->dev, 0, 0);
 
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
 	bs_set(sde_enc, 1);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys) {
+			atomic_set(&phys->vsync_cnt, 0);
+			atomic_set(&phys->underrun_cnt, 0);
+
 			if (phys->ops.enable)
 				phys->ops.enable(phys);
 
@@ -376,7 +421,8 @@
 			 * the encoder role found at panel probe time
 			 */
 			if (phys->ops.is_master && phys->ops.is_master(phys)) {
-				DBG("phys enc master is now idx %d", i);
+				SDE_DEBUG_ENC(sde_enc,
+						"master is now idx %d\n", i);
 				sde_enc->cur_master = phys;
 			}
 		}
@@ -390,14 +436,20 @@
 	struct sde_kms *sde_kms;
 	int i = 0;
 
-	DBG("");
-
 	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!drm_enc->dev) {
+		SDE_ERROR("invalid dev\n");
+		return;
+	} else if (!drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid dev_private\n");
 		return;
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 
@@ -406,16 +458,25 @@
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys && phys->ops.disable)
-			phys->ops.disable(phys);
+		if (phys) {
+			if (phys->ops.disable && !phys->ops.is_master(phys))
+				phys->ops.disable(phys);
+			phys->connector = NULL;
+		}
 	}
 
+	if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+		sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
 	sde_enc->cur_master = NULL;
-	DBG("clear phys enc master");
+	SDE_DEBUG_ENC(sde_enc, "cleared master\n");
 
 	bs_set(sde_enc, 0);
+	sde_cp_crtc_destroy_properties(drm_enc->crtc);
 
 	sde_rm_release(&sde_kms->rm, drm_enc);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 }
 
 static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
@@ -434,8 +495,6 @@
 {
 	int i = 0;
 
-	DBG("");
-
 	for (i = 0; i < catalog->intf_count; i++) {
 		if (catalog->intf[i].type == type
 		    && catalog->intf[i].controller_id == controller_id) {
@@ -455,15 +514,14 @@
 	return WB_MAX;
 }
 
-static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc)
+static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phy_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
 	unsigned long lock_flags;
 
-	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+	if (!drm_enc || !phy_enc)
 		return;
-	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
@@ -471,6 +529,17 @@
 	if (sde_enc->crtc_vblank_cb)
 		sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
 	spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
+
+	atomic_inc(&phy_enc->vsync_cnt);
+}
+
+static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phy_enc)
+{
+	if (!phy_enc)
+		return;
+
+	atomic_inc(&phy_enc->underrun_cnt);
 }
 
 void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
@@ -483,6 +552,11 @@
 
 	enable = vbl_cb ? true : false;
 
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_ENC(sde_enc, "\n");
 	MSM_EVT(drm_enc->dev, enable, 0);
 
 	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
@@ -521,8 +595,121 @@
 	wake_up_all(&sde_enc->pending_kickoff_wq);
 }
 
-void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc,
-		void (*kickoff_cb)(void *), void *kickoff_data)
+/**
+ * _sde_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
+		struct sde_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+	struct sde_hw_ctl *ctl;
+
+	if (!drm_enc || !phys) {
+		SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+				drm_enc != 0, phys != 0);
+		return;
+	}
+
+	ctl = phys->hw_ctl;
+	if (!ctl || !ctl->ops.trigger_flush) {
+		SDE_ERROR("missing trigger cb\n");
+		return;
+	}
+
+	if (extra_flush_bits && ctl->ops.update_pending_flush)
+		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+	ctl->ops.trigger_flush(ctl);
+	MSM_EVT(drm_enc->dev, drm_enc->base.id, ctl->idx);
+}
+
+/**
+ * _sde_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
+{
+	if (!phys) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
+		phys->ops.trigger_start(phys);
+}
+
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_ctl *ctl;
+	int ctl_idx = -1;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	if (ctl && ctl->ops.trigger_start) {
+		ctl->ops.trigger_start(ctl);
+		ctl_idx = ctl->idx;
+	}
+
+	if (phys_enc && phys_enc->parent)
+		MSM_EVT(phys_enc->parent->dev,
+				phys_enc->parent->base.id,
+				ctl_idx);
+}
+
+/**
+ * _sde_encoder_kickoff_phys - handle physical encoder kickoff
+ *	Iterate through the physical encoders and perform consolidated flush
+ *	and/or control start triggering as needed. This is done in the virtual
+ *	encoder rather than the individual physical ones in order to handle
+ *	use cases that require visibility into multiple physical encoders at
+ *	a time.
+ * sde_enc: Pointer to virtual encoder structure
+ */
+static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
+{
+	struct sde_hw_ctl *ctl;
+	uint32_t i, pending_flush;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	pending_flush = 0x0;
+
+	/* don't perform flush/start operations for slave encoders */
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		ctl = phys->hw_ctl;
+		if (!ctl || phys->enable_state == SDE_ENC_DISABLED)
+			continue;
+
+		if (!phys->ops.needs_split_flush ||
+				!phys->ops.needs_split_flush(phys))
+			_sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
+		else if (ctl->ops.get_pending_flush)
+			pending_flush |= ctl->ops.get_pending_flush(ctl);
+	}
+
+	/* for split flush, combine pending flush masks and send to master */
+	if (pending_flush && sde_enc->cur_master) {
+		_sde_encoder_trigger_flush(
+				&sde_enc->base,
+				sde_enc->cur_master,
+				pending_flush);
+	}
+
+	_sde_encoder_trigger_start(sde_enc->cur_master);
+}
+
+void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
@@ -532,11 +719,12 @@
 	int ret;
 
 	if (!drm_enc) {
-		DRM_ERROR("invalid arguments");
+		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
+	SDE_DEBUG_ENC(sde_enc, "\n");
 	MSM_EVT(drm_enc->dev, 0, 0);
 
 	spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
@@ -572,12 +760,12 @@
 				msecs_to_jiffies(WAIT_TIMEOUT_MSEC));
 		spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
 		if (!ret)
-			DBG("wait %u msec timed out", WAIT_TIMEOUT_MSEC);
+			SDE_DEBUG_ENC(sde_enc, "wait %ums timed out\n",
+					WAIT_TIMEOUT_MSEC);
 	}
 
 	/* All phys encs are ready to go, trigger the kickoff */
-	if (kickoff_cb)
-		kickoff_cb(kickoff_data);
+	_sde_encoder_kickoff_phys(sde_enc);
 
 	/* Allow phys encs to handle any post-kickoff business */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -588,6 +776,85 @@
 	}
 }
 
+static int _sde_encoder_status_show(struct seq_file *s, void *data)
+{
+	struct sde_encoder_virt *sde_enc;
+	int i;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	sde_enc = s->private;
+
+	mutex_lock(&sde_enc->enc_lock);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+		if (!phys)
+			continue;
+
+		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
+				phys->intf_idx - INTF_0,
+				atomic_read(&phys->vsync_cnt),
+				atomic_read(&phys->underrun_cnt));
+
+		switch (phys->intf_mode) {
+		case INTF_MODE_VIDEO:
+			seq_puts(s, "mode: video\n");
+			break;
+		case INTF_MODE_CMD:
+			seq_puts(s, "mode: command\n");
+			break;
+		case INTF_MODE_WB_BLOCK:
+			seq_puts(s, "mode: wb block\n");
+			break;
+		case INTF_MODE_WB_LINE:
+			seq_puts(s, "mode: wb line\n");
+			break;
+		default:
+			seq_puts(s, "mode: ???\n");
+			break;
+		}
+	}
+	mutex_unlock(&sde_enc->enc_lock);
+
+	return 0;
+}
+
+static int _sde_encoder_debugfs_status_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _sde_encoder_status_show, inode->i_private);
+}
+
+static void _sde_encoder_init_debugfs(struct drm_encoder *drm_enc,
+	struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms)
+{
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_sde_encoder_debugfs_status_open,
+		.read =		seq_read,
+		.llseek =	seq_lseek,
+		.release =	single_release,
+	};
+	char name[SDE_NAME_SIZE];
+
+	if (!drm_enc || !sde_enc || !sde_kms) {
+		SDE_ERROR("invalid encoder or kms\n");
+		return;
+	}
+
+	snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+	/* create overall sub-directory for the encoder */
+	sde_enc->debugfs_root = debugfs_create_dir(name,
+					sde_debugfs_get_root(sde_kms));
+	if (sde_enc->debugfs_root) {
+		/* don't error check these */
+		debugfs_create_file("status", 0644,
+			sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
+	}
+}
+
 static int sde_encoder_virt_add_phys_encs(
 		u32 display_caps,
 		struct sde_encoder_virt *sde_enc,
@@ -595,7 +862,7 @@
 {
 	struct sde_encoder_phys *enc = NULL;
 
-	DBG("");
+	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	/*
 	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
@@ -603,7 +870,7 @@
 	 */
 	if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
 			ARRAY_SIZE(sde_enc->phys_encs)) {
-		DRM_ERROR("Too many physical encoders %d, unable to add\n",
+		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
 			  sde_enc->num_phys_encs);
 		return -EINVAL;
 	}
@@ -612,7 +879,7 @@
 		enc = sde_encoder_phys_vid_init(params);
 
 		if (IS_ERR_OR_NULL(enc)) {
-			DRM_ERROR("Failed to initialize phys vid enc: %ld\n",
+			SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
 				PTR_ERR(enc));
 			return enc == 0 ? -EINVAL : PTR_ERR(enc);
 		}
@@ -625,7 +892,7 @@
 		enc = sde_encoder_phys_cmd_init(params);
 
 		if (IS_ERR_OR_NULL(enc)) {
-			DRM_ERROR("Failed to initialize phys cmd enc: %ld\n",
+			SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
 				PTR_ERR(enc));
 			return enc == 0 ? -EINVAL : PTR_ERR(enc);
 		}
@@ -642,10 +909,15 @@
 {
 	struct sde_encoder_phys *enc = NULL;
 
-	DBG("");
+	if (!sde_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
-		DRM_ERROR("Too many physical encoders %d, unable to add\n",
+		SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
 			  sde_enc->num_phys_encs);
 		return -EINVAL;
 	}
@@ -653,7 +925,7 @@
 	enc = sde_encoder_phys_wb_init(params);
 
 	if (IS_ERR_OR_NULL(enc)) {
-		DRM_ERROR("Failed to initialize phys wb enc: %ld\n",
+		SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
 			PTR_ERR(enc));
 		return enc == 0 ? -EINVAL : PTR_ERR(enc);
 	}
@@ -674,16 +946,23 @@
 	enum sde_intf_type intf_type;
 	struct sde_encoder_virt_ops parent_ops = {
 		sde_encoder_vblank_callback,
+		sde_encoder_underrun_callback,
 		sde_encoder_handle_phys_enc_ready_for_kickoff
 	};
 	struct sde_enc_phys_init_params phys_params;
 
+	if (!sde_enc || !sde_kms) {
+		SDE_ERROR("invalid arg(s), enc %d kms %d\n",
+				sde_enc != 0, sde_kms != 0);
+		return -EINVAL;
+	}
+
 	memset(&phys_params, 0, sizeof(phys_params));
 	phys_params.sde_kms = sde_kms;
 	phys_params.parent = &sde_enc->base;
 	phys_params.parent_ops = parent_ops;
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
 		*drm_enc_mode = DRM_MODE_ENCODER_DSI;
@@ -695,7 +974,7 @@
 		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
 		intf_type = INTF_WB;
 	} else {
-		DRM_ERROR("Unsupported display interface type");
+		SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
 		return -EINVAL;
 	}
 
@@ -703,8 +982,9 @@
 
 	sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
 
-	DBG("dsi_info->num_of_h_tiles %d", disp_info->num_of_h_tiles);
+	SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
 
+	mutex_lock(&sde_enc->enc_lock);
 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
 		/*
 		 * Left-most tile is at index 0, content is controller id
@@ -722,7 +1002,7 @@
 			phys_params.split_role = ENC_ROLE_SOLO;
 		}
 
-		DBG("h_tile_instance %d = %d, split_role %d",
+		SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
 				i, controller_id, phys_params.split_role);
 
 		if (intf_type == INTF_WB) {
@@ -731,8 +1011,8 @@
 					sde_kms->catalog,
 					intf_type, controller_id);
 			if (phys_params.wb_idx == WB_MAX) {
-				DRM_ERROR(
-					"Error: could not get writeback: type %d, id %d\n",
+				SDE_ERROR_ENC(sde_enc,
+					"could not get wb: type %d, id %d\n",
 					intf_type, controller_id);
 				ret = -EINVAL;
 			}
@@ -742,8 +1022,8 @@
 					sde_kms->catalog, intf_type,
 					controller_id);
 			if (phys_params.intf_idx == INTF_MAX) {
-				DRM_ERROR(
-					"Error: could not get writeback: type %d, id %d\n",
+				SDE_ERROR_ENC(sde_enc,
+					"could not get wb: type %d, id %d\n",
 					intf_type, controller_id);
 				ret = -EINVAL;
 			}
@@ -759,9 +1039,11 @@
 						sde_enc,
 						&phys_params);
 			if (ret)
-				DRM_ERROR("Failed to add phys encs\n");
+				SDE_ERROR_ENC(sde_enc,
+						"failed to add phys encs\n");
 		}
 	}
+	mutex_unlock(&sde_enc->enc_lock);
 
 
 	return ret;
@@ -777,14 +1059,13 @@
 	int drm_enc_mode = DRM_MODE_ENCODER_NONE;
 	int ret = 0;
 
-	DBG("");
-
 	sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
 	if (!sde_enc) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
+	mutex_init(&sde_enc->enc_lock);
 	ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
 			&drm_enc_mode);
 	if (ret)
@@ -799,12 +1080,14 @@
 	sde_enc->pending_kickoff_mask = 0;
 	init_waitqueue_head(&sde_enc->pending_kickoff_wq);
 
-	DBG("Created encoder");
+	_sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
+
+	SDE_DEBUG_ENC(sde_enc, "created\n");
 
 	return drm_enc;
 
 fail:
-	DRM_ERROR("Failed to create encoder\n");
+	SDE_ERROR("failed to create encoder\n");
 	if (drm_enc)
 		sde_encoder_destroy(drm_enc);
 
@@ -816,13 +1099,12 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	int i, ret = 0;
 
-	DBG("");
-
 	if (!drm_enc) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid encoder\n");
 		return -EINVAL;
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -847,27 +1129,28 @@
 	u32 i = 0;
 	u32 num_displays = 0;
 
-	DBG("");
+	SDE_DEBUG("\n");
 
 	if (!dev || !dev->dev_private) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid device %d\n", dev != 0);
 		return;
 	}
 
 	priv = dev->dev_private;
 	priv->num_encoders = 0;
 	if (!priv->kms || !priv->dm) {
-		DRM_ERROR("Invalid pointer");
+		SDE_ERROR("invalid priv pointer, kms %d dm %d\n",
+				priv->kms != 0, priv->dm != 0);
 		return;
 	}
 	disp_man = priv->dm;
 
 	num_displays = display_manager_get_count(disp_man);
-	DBG("num_displays %d", num_displays);
+	SDE_DEBUG("num_displays %d\n", num_displays);
 
 	if (num_displays > ARRAY_SIZE(priv->encoders)) {
 		num_displays = ARRAY_SIZE(priv->encoders);
-		DRM_ERROR("Too many displays found, capping to %d",
+		SDE_ERROR("too many displays found, capping to %d\n",
 				num_displays);
 	}
 
@@ -878,19 +1161,19 @@
 
 		ret = display_manager_get_info_by_index(disp_man, i, &info);
 		if (ret) {
-			DRM_ERROR("Failed to get display info, %d", ret);
+			SDE_ERROR("failed to get display info, %d\n", ret);
 			return;
 		}
 
 		enc = sde_encoder_virt_init(dev, &info);
 		if (IS_ERR_OR_NULL(enc)) {
-			DRM_ERROR("Encoder initialization failed");
+			SDE_ERROR("encoder initialization failed\n");
 			return;
 		}
 
 		ret = display_manager_drm_init_by_index(disp_man, i, enc);
 		if (ret) {
-			DRM_ERROR("Display drm_init failed, %d", ret);
+			SDE_ERROR("display drm_init failed, %d\n", ret);
 			return;
 		}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 026c611..22d187e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,11 +46,16 @@
  *	provides for the physical encoders to use to callback.
  * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
  *			Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *			Note: This is called from IRQ handler context.
  * @handle_ready_for_kickoff:	Notify virtual encoder that this phys encoder
  *				is now ready for the next kickoff.
  */
 struct sde_encoder_virt_ops {
-	void (*handle_vblank_virt)(struct drm_encoder *);
+	void (*handle_vblank_virt)(struct drm_encoder *,
+			struct sde_encoder_phys *phys);
+	void (*handle_underrun_virt)(struct drm_encoder *,
+			struct sde_encoder_phys *phys);
 	void (*handle_ready_for_kickoff)(struct drm_encoder *,
 			struct sde_encoder_phys *phys);
 };
@@ -79,7 +84,8 @@
  *				triggering the next kickoff
  *				(ie for previous tx to complete)
  * @handle_post_kickoff:	Do any work necessary post-kickoff work
- * @needs_ctl_start:		Whether encoder type needs ctl_start
+ * @trigger_start:		Process start event on physical encoder
+ * @needs_split_flush:		Whether encoder type needs split flush
  */
 
 struct sde_encoder_phys_ops {
@@ -104,7 +110,8 @@
 	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
 			bool *wait_until_ready);
 	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
-	bool (*needs_ctl_start)(struct sde_encoder_phys *phys_enc);
+	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
+	bool (*needs_split_flush)(struct sde_encoder_phys *phys_enc);
 };
 
 /**
@@ -121,10 +128,26 @@
 };
 
 /**
+ * enum sde_intr_idx - sde encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ */
+enum sde_intr_idx {
+	INTR_IDX_VSYNC,
+	INTR_IDX_PINGPONG,
+	INTR_IDX_UNDERRUN,
+	INTR_IDX_RDPTR,
+	INTR_IDX_MAX,
+};
+
+/**
  * struct sde_encoder_phys - physical encoder that drives a single INTF block
  *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
  *	phys_vid or phys_cmd for video mode or command mode encs respectively.
  * @parent:		Pointer to the containing virtual encoder
+ * @connector:		If a mode is set, cached pointer to the active connector
  * @ops:		Operations exposed to the virtual encoder
  * @parent_ops:		Callbacks exposed by the parent to the phys_enc
  * @hw_mdptop:		Hardware interface to the top registers
@@ -136,12 +159,16 @@
  * @enabled:		Whether the encoder has enabled and running a mode
  * @split_role:		Role to play in a split-panel configuration
  * @intf_mode:		Interface mode
+ * @intf_idx:		Interface index on sde hardware
  * @spin_lock:		Lock for IRQ purposes
- * @mode_3d:		3D mux configuration
  * @enable_state:	Enable state tracking
+ * @vblank_refcount:	Reference count of vblank request
+ * @vsync_cnt:		Vsync count for the physical encoder
+ * @underrun_cnt:	Underrun count for the physical encoder
  */
 struct sde_encoder_phys {
 	struct drm_encoder *parent;
+	struct drm_connector *connector;
 	struct sde_encoder_phys_ops ops;
 	struct sde_encoder_virt_ops parent_ops;
 	struct sde_hw_mdp *hw_mdptop;
@@ -152,9 +179,12 @@
 	struct drm_display_mode cached_mode;
 	enum sde_enc_split_role split_role;
 	enum sde_intf_mode intf_mode;
+	enum sde_intf intf_idx;
 	spinlock_t spin_lock;
-	enum sde_3d_blend_mode mode_3d;
 	enum sde_enc_enable_state enable_state;
+	atomic_t vblank_refcount;
+	atomic_t vsync_cnt;
+	atomic_t underrun_cnt;
 };
 
 /**
@@ -167,7 +197,7 @@
  */
 struct sde_encoder_phys_vid {
 	struct sde_encoder_phys base;
-	int irq_idx;
+	int irq_idx[INTR_IDX_MAX];
 	struct sde_hw_intf *hw_intf;
 	struct completion vblank_completion;
 };
@@ -195,8 +225,7 @@
 	int intf_idx;
 	int stream_sel;
 	struct sde_hw_pingpong *hw_pp;
-	int pp_rd_ptr_irq_idx;
-	int pp_tx_done_irq_idx;
+	int irq_idx[INTR_IDX_MAX];
 	wait_queue_head_t pp_tx_done_wq;
 	atomic_t pending_cnt;
 };
@@ -301,4 +330,26 @@
 		struct drm_framebuffer *fb, const struct sde_format *format,
 		struct sde_rect *wb_roi);
 
+/**
+ * sde_encoder_helper_trigger_start - control start helper function
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
+
+
+static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
+		struct sde_encoder_phys *phys_enc)
+{
+	enum sde_rm_topology_name topology;
+
+	topology = sde_connector_get_topology_name(phys_enc->connector);
+	if (phys_enc->split_role == ENC_ROLE_SOLO &&
+			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE)
+		return BLEND_3D_H_ROW_INT;
+
+	return BLEND_3D_NONE;
+}
+
 #endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 95011d9..3f478fa 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  *
  */
 
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/jiffies.h>
 
 #include "sde_encoder_phys.h"
@@ -19,6 +20,16 @@
 #include "sde_core_irq.h"
 #include "sde_formats.h"
 
+#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
@@ -45,6 +56,8 @@
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
+	if (phys_enc)
+		SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
 	return true;
 }
 
@@ -59,8 +72,13 @@
 	struct sde_rm_hw_iter iter;
 	int i, instance;
 
+	if (!phys_enc || !mode || !adj_mode) {
+		SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
+				phys_enc != 0, mode != 0, adj_mode != 0);
+		return;
+	}
 	phys_enc->cached_mode = *adj_mode;
-	SDE_DEBUG("intf %d, caching mode:\n", cmd_enc->intf_idx);
+	SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
 	drm_mode_debug_printmodeline(adj_mode);
 
 	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
@@ -74,7 +92,8 @@
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
-		SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
+		SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
 		phys_enc->hw_ctl = NULL;
 		return;
 	}
@@ -88,7 +107,7 @@
 	}
 
 	if (IS_ERR_OR_NULL(cmd_enc->hw_pp)) {
-		SDE_ERROR("failed init pingpong: %ld\n",
+		SDE_ERROR_CMDENC(cmd_enc, "failed to init pingpong: %ld\n",
 				PTR_ERR(cmd_enc->hw_pp));
 		cmd_enc->hw_pp = NULL;
 		phys_enc->hw_ctl = NULL;
@@ -100,9 +119,13 @@
 static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys_cmd *cmd_enc = arg;
-	struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+	struct sde_encoder_phys *phys_enc;
 	int new_pending_cnt;
 
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
 	new_pending_cnt = atomic_dec_return(&cmd_enc->pending_cnt);
 	MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
 
@@ -110,7 +133,8 @@
 	wake_up_all(&cmd_enc->pp_tx_done_wq);
 
 	/* Trigger a pending flush */
-	phys_enc->parent_ops.handle_ready_for_kickoff(phys_enc->parent,
+	if (phys_enc->parent_ops.handle_ready_for_kickoff)
+		phys_enc->parent_ops.handle_ready_for_kickoff(phys_enc->parent,
 			phys_enc);
 }
 
@@ -119,28 +143,48 @@
 	struct sde_encoder_phys_cmd *cmd_enc = arg;
 	struct sde_encoder_phys *phys_enc = &cmd_enc->base;
 
-	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+	if (!cmd_enc)
+		return;
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
 }
 
-static int sde_encoder_phys_cmd_register_pp_irq(
-		struct sde_encoder_phys *phys_enc,
-		enum sde_intr_type intr_type,
-		int *irq_idx,
-		void (*irq_func)(void *, int),
-		const char *irq_name)
+static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_cmd *cmd_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+
+	if (!cmd_enc)
+		return;
+
+	phys_enc = &cmd_enc->base;
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
+	enum sde_intr_type intr_type, int *irq_idx,
+	void (*irq_func)(void *, int), const char *irq_name)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_irq_callback irq_cb;
 	int ret = 0;
 
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
 	*irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms, intr_type,
 			cmd_enc->hw_pp->idx);
 	if (*irq_idx < 0) {
-		DRM_ERROR(
-			"Failed to lookup IRQ index for %s with pp=%d",
+		SDE_ERROR_CMDENC(cmd_enc,
+			"failed to lookup IRQ index for %s with pp=%d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx);
+			cmd_enc->hw_pp->idx - PINGPONG_0);
 		return -EINVAL;
 	}
 
@@ -149,16 +193,18 @@
 	ret = sde_core_irq_register_callback(phys_enc->sde_kms, *irq_idx,
 			&irq_cb);
 	if (ret) {
-		DRM_ERROR("Failed to register IRQ callback %s", irq_name);
+		SDE_ERROR_CMDENC(cmd_enc,
+				"failed to register IRQ callback %s\n",
+				irq_name);
 		return ret;
 	}
 
 	ret = sde_core_irq_enable(phys_enc->sde_kms, irq_idx, 1);
 	if (ret) {
-		DRM_ERROR(
-			"Failed to enable IRQ for %s, pp %d, irq_idx=%d",
+		SDE_ERROR_CMDENC(cmd_enc,
+			"failed to enable IRQ for %s, pp %d, irq_idx %d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx,
+			cmd_enc->hw_pp->idx - PINGPONG_0,
 			*irq_idx);
 		*irq_idx = -EINVAL;
 
@@ -168,27 +214,32 @@
 		return ret;
 	}
 
-	DBG("registered IRQ %s for pp %d, irq_idx=%d",
+	SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
 			irq_name,
-			cmd_enc->hw_pp->idx,
+			cmd_enc->hw_pp->idx - PINGPONG_0,
 			*irq_idx);
 
 	return ret;
 }
 
-static int sde_encoder_phys_cmd_unregister_pp_irq(
+static int sde_encoder_phys_cmd_unregister_irq(
 		struct sde_encoder_phys *phys_enc,
 		int irq_idx)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
 
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
 	sde_core_irq_disable(phys_enc->sde_kms, &irq_idx, 1);
 	sde_core_irq_register_callback(phys_enc->sde_kms, irq_idx,
 			NULL);
 
-	DBG("unregister IRQ for pp %d, irq_idx=%d\n",
-			cmd_enc->hw_pp->idx,
+	SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
+			cmd_enc->hw_pp->idx - PINGPONG_0,
 			irq_idx);
 
 	return 0;
@@ -206,11 +257,16 @@
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 
-	DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
 
 	if (!cmd_enc->hw_pp->ops.setup_tearcheck ||
 		!cmd_enc->hw_pp->ops.enable_tearcheck) {
-		DBG("tearcheck unsupported");
+		SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
 		return;
 	}
 
@@ -227,7 +283,7 @@
 	 */
 	vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
 	if (!vsync_hz) {
-		DBG("invalid vsync clock rate");
+		SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
 		return;
 	}
 
@@ -246,16 +302,21 @@
 	tc_cfg.start_pos = mode->vdisplay;
 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
 
-	DBG("tc %d vsync_clk_speed_hz %u mode->vtotal %u mode->vrefresh %u",
-		cmd_enc->hw_pp->idx, vsync_hz, mode->vtotal, mode->vrefresh);
-	DBG("tc %d enable %u start_pos %u rd_ptr_irq %u",
-		tc_enable, cmd_enc->hw_pp->idx, tc_cfg.start_pos,
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+		cmd_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		mode->vtotal, mode->vrefresh);
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+		cmd_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
 		tc_cfg.rd_ptr_irq);
-	DBG("tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u",
-		cmd_enc->hw_pp->idx, tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
-		tc_cfg.vsync_init_val);
-	DBG("tc %d sync_cfgheight %u sync_thresh_start %u sync_thresh_cont %u",
-		cmd_enc->hw_pp->idx, tc_cfg.sync_cfg_height,
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+		cmd_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+	SDE_DEBUG_CMDENC(cmd_enc,
+		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+		cmd_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
 		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
 
 	cmd_enc->hw_pp->ops.setup_tearcheck(cmd_enc->hw_pp, &tc_cfg);
@@ -269,22 +330,32 @@
 		to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
-	if (!phys_enc->hw_ctl->ops.setup_intf_cfg)
+	if (!phys_enc || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
 		return;
+	}
 
-	DBG("intf %d pp %d, enabling mode:", cmd_enc->intf_idx,
-			cmd_enc->hw_pp->idx);
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+			cmd_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
 	intf_cfg.intf = cmd_enc->intf_idx;
-	intf_cfg.mode_3d = phys_enc->mode_3d;
 	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
 	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
 	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
 
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
+static bool sde_encoder_phys_cmd_needs_split_flush(
+		struct sde_encoder_phys *phys_enc)
+{
+	return false;
+}
+
 static void sde_encoder_phys_cmd_split_config(
 		struct sde_encoder_phys *phys_enc, bool enable)
 {
@@ -293,12 +364,17 @@
 	struct sde_hw_mdp *hw_mdptop = phys_enc->hw_mdptop;
 	struct split_pipe_cfg cfg = { 0 };
 
-	DBG("enable %d", enable);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "enable %d\n", enable);
 
 	cfg.en = enable;
 	cfg.mode = INTF_MODE_CMD;
 	cfg.intf = cmd_enc->intf_idx;
-	cfg.split_flush_en = enable;
+	cfg.split_flush_en = enable &&
+		sde_encoder_phys_cmd_needs_split_flush(phys_enc);
 
 	if (hw_mdptop && hw_mdptop->ops.setup_split_pipe)
 		hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
@@ -312,24 +388,46 @@
 		to_sde_encoder_phys_cmd(phys_enc);
 	int ret = 0;
 
-	DBG("enable %d", enable);
-
-	/* Slave encoders don't report vblank */
-	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-		if (enable)
-			ret = sde_encoder_phys_cmd_register_pp_irq(phys_enc,
-					SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-					&cmd_enc->pp_rd_ptr_irq_idx,
-					sde_encoder_phys_cmd_pp_rd_ptr_irq,
-					"pp_rd_ptr");
-		else
-			ret = sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
-					cmd_enc->pp_rd_ptr_irq_idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
 	}
 
+	/* Slave encoders don't report vblank */
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+			__builtin_return_address(0),
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	MSM_EVTMSG(phys_enc->parent->dev, NULL, enable,
+			atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+				&cmd_enc->irq_idx[INTR_IDX_PINGPONG],
+				sde_encoder_phys_cmd_pp_rd_ptr_irq,
+				"pp_rd_ptr");
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
+
+	if (enable)
+		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+			SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+			&cmd_enc->irq_idx[INTR_IDX_RDPTR],
+			sde_encoder_phys_cmd_pp_rd_ptr_irq, "pp_rd_ptr");
+	else
+		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+			cmd_enc->irq_idx[INTR_IDX_RDPTR]);
+
+end:
 	if (ret)
-		DRM_ERROR("control vblank irq error %d, enable %d\n", ret,
-				enable);
+		SDE_ERROR_CMDENC(cmd_enc,
+				"control vblank irq error %d, enable %d\n",
+				ret, enable);
 
 	return ret;
 }
@@ -338,15 +436,19 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+	struct sde_hw_ctl *ctl;
 	u32 flush_mask;
 	int ret = 0;
 
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
+
 	if (WARN_ON(phys_enc->enable_state == SDE_ENC_ENABLED))
 		return;
 
-	DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
-
 	/*
 	 * Only master configures master/slave configuration, so no slave check
 	 * In solo configuration, solo encoder needs to program no-split
@@ -359,28 +461,40 @@
 	sde_encoder_phys_cmd_pingpong_config(phys_enc);
 
 	/* Both master and slave need to register for pp_tx_done */
-	ret = sde_encoder_phys_cmd_register_pp_irq(phys_enc,
+	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
 			SDE_IRQ_TYPE_PING_PONG_COMP,
-			&cmd_enc->pp_tx_done_irq_idx,
+			&cmd_enc->irq_idx[INTR_IDX_PINGPONG],
 			sde_encoder_phys_cmd_pp_tx_done_irq,
 			"pp_tx_done");
-
 	if (ret)
 		return;
 
 	ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 	if (ret) {
-		sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
-				cmd_enc->pp_tx_done_irq_idx);
+		sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
 		return;
 	}
 
+	ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+			SDE_IRQ_TYPE_INTF_UNDER_RUN,
+			&cmd_enc->irq_idx[INTR_IDX_UNDERRUN],
+			sde_encoder_phys_cmd_underrun_irq,
+			"underrun");
+	if (ret) {
+		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		sde_encoder_phys_cmd_unregister_irq(phys_enc,
+				cmd_enc->irq_idx[INTR_IDX_UNDERRUN]);
+		return;
+	}
+
+	ctl = phys_enc->hw_ctl;
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 	phys_enc->enable_state = SDE_ENC_ENABLED;
 
-	DBG("Update pending flush CTL_ID %d flush_mask %x, INTF %d",
-			ctl->idx, flush_mask, cmd_enc->intf_idx);
+	SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+			ctl->idx - CTL_0, flush_mask);
 }
 
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
@@ -388,18 +502,30 @@
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
 
-	DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
 
 	if (WARN_ON(phys_enc->enable_state == SDE_ENC_DISABLED))
 		return;
 
-	sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
-			cmd_enc->pp_tx_done_irq_idx);
+	sde_encoder_phys_cmd_unregister_irq(phys_enc,
+			cmd_enc->irq_idx[INTR_IDX_UNDERRUN]);
 	sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+	sde_encoder_phys_cmd_unregister_irq(phys_enc,
+			cmd_enc->irq_idx[INTR_IDX_PINGPONG]);
 
 	atomic_set(&cmd_enc->pending_cnt, 0);
 	wake_up_all(&cmd_enc->pp_tx_done_wq);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
+
+	if (atomic_read(&phys_enc->vblank_refcount))
+		SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
+				phys_enc->parent->base.id,
+				phys_enc->split_role,
+				atomic_read(&phys_enc->vblank_refcount));
 }
 
 static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
@@ -407,6 +533,10 @@
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
 
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
 	kfree(cmd_enc);
 }
 
@@ -418,7 +548,11 @@
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
 
-	DBG("intf %d", cmd_enc->intf_idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "\n");
 	hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
 }
 
@@ -442,7 +576,11 @@
 			to_sde_encoder_phys_cmd(phys_enc);
 	int new_pending_cnt;
 
-	DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", cmd_enc->hw_pp->idx - PINGPONG_0);
 
 	/*
 	 * Mark kickoff request as outstanding. If there are more than one,
@@ -452,18 +590,13 @@
 	*need_to_wait = new_pending_cnt != 1;
 
 	if (*need_to_wait)
-		SDE_DEBUG("intf %d pp %d needs to wait, new_pending_cnt %d",
-				cmd_enc->intf_idx, cmd_enc->hw_pp->idx,
+		SDE_DEBUG_CMDENC(cmd_enc,
+				"pp %d needs to wait, new_pending_cnt %d",
+				cmd_enc->hw_pp->idx - PINGPONG_0,
 				new_pending_cnt);
 	MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
 }
 
-static bool sde_encoder_phys_cmd_needs_ctl_start(
-		struct sde_encoder_phys *phys_enc)
-{
-	return true;
-}
-
 static void sde_encoder_phys_cmd_init_ops(
 		struct sde_encoder_phys_ops *ops)
 {
@@ -477,7 +610,8 @@
 	ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
 	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
-	ops->needs_ctl_start = sde_encoder_phys_cmd_needs_ctl_start;
+	ops->trigger_start = sde_encoder_helper_trigger_start;
+	ops->needs_split_flush = sde_encoder_phys_cmd_needs_split_flush;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -488,11 +622,12 @@
 	struct sde_hw_mdp *hw_mdp;
 	int ret = 0;
 
-	DBG("intf %d", p->intf_idx);
+	SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
 
 	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
 	if (!cmd_enc) {
 		ret = -ENOMEM;
+		SDE_ERROR("failed to allocate\n");
 		goto fail;
 	}
 	phys_enc = &cmd_enc->base;
@@ -506,6 +641,7 @@
 	phys_enc->hw_mdptop = hw_mdp;
 
 	cmd_enc->intf_idx = p->intf_idx;
+	phys_enc->intf_idx = p->intf_idx;
 
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
 	phys_enc->parent = p->parent;
@@ -514,14 +650,14 @@
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_CMD;
 	spin_lock_init(&phys_enc->spin_lock);
-	phys_enc->mode_3d = BLEND_3D_NONE;
 	cmd_enc->stream_sel = 0;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 	atomic_set(&cmd_enc->pending_cnt, 0);
+	atomic_set(&phys_enc->vblank_refcount, 0);
 
 	init_waitqueue_head(&cmd_enc->pp_tx_done_wq);
 
-	DBG("Created sde_encoder_phys_cmd for intf %d", cmd_enc->intf_idx);
+	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
 
 	return phys_enc;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 0432b04..17a2cb52 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/jiffies.h>
 
 #include "sde_encoder_phys.h"
@@ -17,6 +18,18 @@
 #include "sde_core_irq.h"
 #include "sde_formats.h"
 
+#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
 #define VBLANK_TIMEOUT msecs_to_jiffies(100)
 
 #define to_sde_encoder_phys_vid(x) \
@@ -42,11 +55,15 @@
 {
 	int rc = 0;
 
-	DBG("intf %d", vid_enc->hw_intf->idx);
+	if (!vid_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
 	rc = wait_for_completion_timeout(&vid_enc->vblank_completion,
 			VBLANK_TIMEOUT);
 	if (rc == 0)
-		DRM_ERROR("timed out waiting for vblank irq\n");
+		SDE_ERROR_VIDENC(vid_enc, "timed out waiting for vblank irq\n");
 }
 
 static void drm_mode_to_intf_timing_params(
@@ -143,26 +160,29 @@
 	u32 actual_vfp_lines = 0;
 
 	/* Fetch must be outside active lines, otherwise undefined. */
-
 	if (start_of_frame_lines >= worst_case_needed_lines) {
-		DBG("Programmable fetch is not needed due to large vbp+vsw");
+		SDE_DEBUG_VIDENC(vid_enc,
+				"prog fetch is not needed, large vbp+vsw\n");
 		actual_vfp_lines = 0;
 	} else if (timing->v_front_porch < needed_vfp_lines) {
 		/* Warn fetch needed, but not enough porch in panel config */
 		pr_warn_once
 			("low vbp+vfp may lead to perf issues in some cases\n");
-		DBG("Less vfp than fetch requires, using entire vfp");
+		SDE_DEBUG_VIDENC(vid_enc,
+				"less vfp than fetch req, using entire vfp\n");
 		actual_vfp_lines = timing->v_front_porch;
 	} else {
-		DBG("Room in vfp for needed prefetch");
+		SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
 		actual_vfp_lines = needed_vfp_lines;
 	}
 
-	DBG("v_front_porch %u v_back_porch %u vsync_pulse_width %u",
-	    timing->v_front_porch, timing->v_back_porch,
-	    timing->vsync_pulse_width);
-	DBG("wc_lines %u needed_vfp_lines %u actual_vfp_lines %u",
-	    worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+	SDE_DEBUG_VIDENC(vid_enc,
+		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+		timing->v_front_porch, timing->v_back_porch,
+		timing->vsync_pulse_width);
+	SDE_DEBUG_VIDENC(vid_enc,
+		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
 
 	return actual_vfp_lines;
 }
@@ -202,8 +222,9 @@
 		f.fetch_start = vfp_fetch_start_vsync_counter;
 	}
 
-	DBG("vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u",
-	    vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+	SDE_DEBUG_VIDENC(vid_enc,
+		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
 
 	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
 	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
@@ -215,7 +236,8 @@
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adj_mode)
 {
-	DBG("");
+	if (phys_enc)
+		SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
 
 	/*
 	 * Modifying mode has consequences when the mode comes back to us
@@ -235,13 +257,14 @@
 	unsigned long lock_flags;
 	struct sde_hw_intf_cfg intf_cfg = { 0 };
 
-	if (WARN_ON(!vid_enc->hw_intf->ops.setup_timing_gen))
+	if (!phys_enc ||
+			!vid_enc->hw_intf->ops.setup_timing_gen ||
+			!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
 		return;
+	}
 
-	if (WARN_ON(!phys_enc->hw_ctl->ops.setup_intf_cfg))
-		return;
-
-	DBG("intf %d, enabling mode:", vid_enc->hw_intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
 	drm_mode_debug_printmodeline(&mode);
 
 	if (phys_enc->split_role != ENC_ROLE_SOLO) {
@@ -250,22 +273,22 @@
 		mode.hsync_start >>= 1;
 		mode.hsync_end >>= 1;
 
-		DBG("split_role %d, halve horizontal: %d %d %d %d",
-				phys_enc->split_role,
-				mode.hdisplay, mode.htotal,
-				mode.hsync_start, mode.hsync_end);
+		SDE_DEBUG_VIDENC(vid_enc,
+			"split_role %d, halve horizontal %d %d %d %d\n",
+			phys_enc->split_role,
+			mode.hdisplay, mode.htotal,
+			mode.hsync_start, mode.hsync_end);
 	}
 
 	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
 
 	fmt = sde_get_sde_format(fmt_fourcc);
-	DBG("fmt_fourcc %d", fmt_fourcc);
+	SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
 
 	intf_cfg.intf = vid_enc->hw_intf->idx;
-	intf_cfg.wb = SDE_NONE;
-	intf_cfg.mode_3d = phys_enc->mode_3d;
 	intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
 	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+	intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
 
 	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
 	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
@@ -279,15 +302,41 @@
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys_vid *vid_enc = arg;
-	struct sde_encoder_phys *phys_enc = &vid_enc->base;
+	struct sde_encoder_phys *phys_enc;
 
-	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+	if (!vid_enc)
+		return;
+
+	phys_enc = &vid_enc->base;
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+				phys_enc);
 
 	/* signal VBLANK completion */
 	complete_all(&vid_enc->vblank_completion);
 }
 
-static void sde_encoder_phys_vid_split_config(
+static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys_vid *vid_enc = arg;
+	struct sde_encoder_phys *phys_enc;
+
+	if (!vid_enc)
+		return;
+
+	phys_enc = &vid_enc->base;
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool sde_encoder_phys_vid_needs_split_flush(
+		struct sde_encoder_phys *phys_enc)
+{
+	return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
+}
+
+static void _sde_encoder_phys_vid_split_config(
 		struct sde_encoder_phys *phys_enc, bool enable)
 {
 	struct sde_encoder_phys_vid *vid_enc =
@@ -295,12 +344,13 @@
 	struct sde_hw_mdp *hw_mdptop = phys_enc->hw_mdptop;
 	struct split_pipe_cfg cfg = { 0 };
 
-	DBG("enable %d", enable);
+	SDE_DEBUG_VIDENC(vid_enc, "enable %d\n", enable);
 
 	cfg.en = enable;
 	cfg.mode = INTF_MODE_VIDEO;
 	cfg.intf = vid_enc->hw_intf->idx;
-	cfg.split_flush_en = enable;
+	cfg.split_flush_en = enable &&
+		sde_encoder_phys_vid_needs_split_flush(phys_enc);
 
 	/* Configure split pipe control to handle master/slave triggering */
 	if (hw_mdptop && hw_mdptop->ops.setup_split_pipe) {
@@ -312,66 +362,76 @@
 	}
 }
 
-static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc)
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
+	enum sde_intr_type intr_type, int *irq_idx,
+	void (*irq_func)(void *, int), const char *irq_name)
 {
 	struct sde_encoder_phys_vid *vid_enc =
 			to_sde_encoder_phys_vid(phys_enc);
 	struct sde_irq_callback irq_cb;
 	int ret = 0;
 
-	vid_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			SDE_IRQ_TYPE_INTF_VSYNC, vid_enc->hw_intf->idx);
-	if (vid_enc->irq_idx < 0) {
-		DRM_ERROR(
-			"Failed to lookup IRQ index for INTF_VSYNC with intf=%d\n",
-			vid_enc->hw_intf->idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
 		return -EINVAL;
 	}
 
-	irq_cb.func = sde_encoder_phys_vid_vblank_irq;
+	*irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms, intr_type,
+			vid_enc->hw_intf->idx);
+	if (*irq_idx < 0) {
+		SDE_ERROR_VIDENC(vid_enc,
+			"failed to lookup IRQ index for %s type:%d\n", irq_name,
+			intr_type);
+		return -EINVAL;
+	}
+
+	irq_cb.func = irq_func;
 	irq_cb.arg = vid_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
-			vid_enc->irq_idx, &irq_cb);
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms, *irq_idx,
+			&irq_cb);
 	if (ret) {
-		DRM_ERROR("failed to register IRQ callback INTF_VSYNC");
+		SDE_ERROR_VIDENC(vid_enc,
+			"failed to register IRQ callback for %s\n", irq_name);
 		return ret;
 	}
 
-	ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
+	ret = sde_core_irq_enable(phys_enc->sde_kms, irq_idx, true);
 	if (ret) {
-		DRM_ERROR(
-			"failed to enable IRQ for INTF_VSYNC, intf %d, irq_idx=%d",
-				vid_enc->hw_intf->idx,
-				vid_enc->irq_idx);
-		vid_enc->irq_idx = -EINVAL;
+		SDE_ERROR_VIDENC(vid_enc,
+			"enable IRQ for intr:%s failed, irq_idx %d\n",
+			irq_name, *irq_idx);
+		*irq_idx = -EINVAL;
 
-		/* Unregister callback on IRQ enable failure */
+		/* unregister callback on IRQ enable failure */
 		sde_core_irq_register_callback(phys_enc->sde_kms,
-				vid_enc->irq_idx, NULL);
+						*irq_idx, NULL);
 		return ret;
 	}
 
-	DBG("registered IRQ for intf %d, irq_idx=%d",
-			vid_enc->hw_intf->idx,
-			vid_enc->irq_idx);
+	SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
+						irq_name, *irq_idx);
 
 	return ret;
 }
 
 static int sde_encoder_phys_vid_unregister_irq(
-		struct sde_encoder_phys *phys_enc)
+	struct sde_encoder_phys *phys_enc, int irq_idx)
 {
 	struct sde_encoder_phys_vid *vid_enc =
 			to_sde_encoder_phys_vid(phys_enc);
 
-	sde_core_irq_register_callback(phys_enc->sde_kms, vid_enc->irq_idx,
-			NULL);
-	sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		goto end;
+	}
 
-	DBG("unregister IRQ for intf %d, irq_idx=%d",
-			vid_enc->hw_intf->idx,
-			vid_enc->irq_idx);
+	sde_core_irq_disable(phys_enc->sde_kms, &irq_idx, 1);
 
+	sde_core_irq_register_callback(phys_enc->sde_kms, irq_idx, NULL);
+
+	SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", irq_idx);
+
+end:
 	return 0;
 }
 
@@ -386,8 +446,13 @@
 	struct sde_rm_hw_iter iter;
 	int i, instance;
 
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
 	phys_enc->cached_mode = *adj_mode;
-	SDE_DEBUG("intf %d, caching mode:\n", vid_enc->hw_intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
 	drm_mode_debug_printmodeline(adj_mode);
 
 	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
@@ -401,7 +466,8 @@
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
-		SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
+		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
 		phys_enc->hw_ctl = NULL;
 		return;
 	}
@@ -411,21 +477,39 @@
 		struct sde_encoder_phys *phys_enc,
 		bool enable)
 {
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
 	int ret = 0;
 
-	DBG("enable %d", enable);
-
-	/* Slave encoders don't report vblank */
-	if (sde_encoder_phys_vid_is_master(phys_enc)) {
-		if (enable)
-			ret = sde_encoder_phys_vid_register_irq(phys_enc);
-		else
-			ret = sde_encoder_phys_vid_unregister_irq(phys_enc);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
 	}
 
+	/* Slave encoders don't report vblank */
+	if (!sde_encoder_phys_vid_is_master(phys_enc))
+		return 0;
+
+	SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
+			__builtin_return_address(0),
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	MSM_EVTMSG(phys_enc->parent->dev, NULL, enable,
+			atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = sde_encoder_phys_vid_register_irq(phys_enc,
+			SDE_IRQ_TYPE_INTF_VSYNC,
+			&vid_enc->irq_idx[INTR_IDX_VSYNC],
+			sde_encoder_phys_vid_vblank_irq, "vsync_irq");
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
+			vid_enc->irq_idx[INTR_IDX_VSYNC]);
+
 	if (ret)
-		DRM_ERROR("control vblank irq error %d, enable %d\n", ret,
-				enable);
+		SDE_ERROR_VIDENC(vid_enc,
+				"control vblank irq error %d, enable %d\n",
+				ret, enable);
 
 	return ret;
 }
@@ -437,35 +521,53 @@
 	struct sde_hw_intf *intf = vid_enc->hw_intf;
 	struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
 	u32 flush_mask = 0;
+	int ret;
 
-	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw: intf %pK ctl %pK\n", vid_enc->hw_intf,
-				phys_enc->hw_ctl);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
 		return;
 	}
 
-	DBG("intf %d", vid_enc->hw_intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
 
 	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
 		return;
 
 	if (phys_enc->split_role == ENC_ROLE_MASTER)
-		sde_encoder_phys_vid_split_config(phys_enc, true);
+		_sde_encoder_phys_vid_split_config(phys_enc, true);
 	else if (phys_enc->split_role == ENC_ROLE_SOLO)
-		sde_encoder_phys_vid_split_config(phys_enc, false);
+		_sde_encoder_phys_vid_split_config(phys_enc, false);
 
 	sde_encoder_phys_vid_setup_timing_engine(phys_enc);
-	sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+	ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+	if (ret)
+		goto end;
+
+	ret = sde_encoder_phys_vid_register_irq(phys_enc,
+		SDE_IRQ_TYPE_INTF_UNDER_RUN,
+		&vid_enc->irq_idx[INTR_IDX_UNDERRUN],
+		sde_encoder_phys_vid_underrun_irq, "underrun");
+	if (ret) {
+		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+		goto end;
+	}
 
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 
-	DBG("Update pending flush CTL_ID %d flush_mask %x, INTF %d",
-		ctl->idx, flush_mask, intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+		ctl->idx - CTL_0, flush_mask);
 
 	/* ctl_flush & timing engine enable will be triggered by framework */
 	if (phys_enc->enable_state == SDE_ENC_DISABLED)
 		phys_enc->enable_state = SDE_ENC_ENABLING;
+
+end:
+	return;
 }
 
 static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
@@ -474,19 +576,24 @@
 	struct sde_encoder_phys_vid *vid_enc =
 			to_sde_encoder_phys_vid(phys_enc);
 
-	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw: intf %pK ctl %pK\n", vid_enc->hw_intf,
-				phys_enc->hw_ctl);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	} else if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
 		return;
 	}
 
-	DBG("intf %d", vid_enc->hw_intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
 
 	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
 		return;
 
-	if (WARN_ON(phys_enc->enable_state == SDE_ENC_DISABLED))
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR("already disabled\n");
 		return;
+	}
 
 	spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
 	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
@@ -506,6 +613,12 @@
 		sde_encoder_phys_vid_wait_for_vblank(vid_enc);
 		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
 	}
+
+	if (atomic_read(&phys_enc->vblank_refcount))
+		SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
+				phys_enc->parent->base.id,
+				phys_enc->split_role,
+				atomic_read(&phys_enc->vblank_refcount));
 }
 
 static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
@@ -513,7 +626,11 @@
 	struct sde_encoder_phys_vid *vid_enc =
 	    to_sde_encoder_phys_vid(phys_enc);
 
-	DBG("intf %d", vid_enc->hw_intf->idx);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
 	kfree(vid_enc);
 }
 
@@ -525,7 +642,12 @@
 	struct sde_encoder_phys_vid *vid_enc =
 		to_sde_encoder_phys_vid(phys_enc);
 
-	DBG("intf %d", vid_enc->hw_intf->idx);
+	if (!phys_enc || !hw_res || !vid_enc->hw_intf) {
+		SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+				phys_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
 	hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
 }
 
@@ -539,15 +661,18 @@
 	if (!sde_encoder_phys_vid_is_master(phys_enc))
 		return 0;
 
-	if (phys_enc->enable_state != SDE_ENC_ENABLED)
+	if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+		SDE_ERROR("encoder not enabled\n");
 		return -EWOULDBLOCK;
+	}
 
 	MSM_EVTMSG(DEV(phys_enc), "waiting", 0, 0);
 
 	ret = wait_for_completion_timeout(&vid_enc->vblank_completion,
 			msecs_to_jiffies(WAIT_TIMEOUT_MSEC));
 	if (!ret) {
-		DBG("wait %u msec timed out", WAIT_TIMEOUT_MSEC);
+		SDE_DEBUG_VIDENC(vid_enc, "wait %u ms timed out\n",
+				WAIT_TIMEOUT_MSEC);
 		MSM_EVTMSG(DEV(phys_enc), "wait_timeout", 0, 0);
 		return -ETIMEDOUT;
 	}
@@ -578,7 +703,11 @@
 	struct sde_encoder_phys_vid *vid_enc =
 			to_sde_encoder_phys_vid(phys_enc);
 
-	DBG("enable_state %d", phys_enc->enable_state);
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
 
 	/*
 	 * Video mode must flush CTL before enabling timing engine
@@ -593,12 +722,6 @@
 	}
 }
 
-static bool sde_encoder_phys_vid_needs_ctl_start(
-		struct sde_encoder_phys *phys_enc)
-{
-	return false;
-}
-
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 {
 	ops->is_master = sde_encoder_phys_vid_is_master;
@@ -612,7 +735,7 @@
 	ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
 	ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
-	ops->needs_ctl_start = sde_encoder_phys_vid_needs_ctl_start;
+	ops->needs_split_flush = sde_encoder_phys_vid_needs_split_flush;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(
@@ -624,14 +747,16 @@
 	struct sde_hw_mdp *hw_mdp;
 	int ret = 0;
 
-	DBG("intf %d", p->intf_idx);
+	if (!p) {
+		ret = -EINVAL;
+		goto fail;
+	}
 
 	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
 	if (!vid_enc) {
 		ret = -ENOMEM;
 		goto fail;
 	}
-	vid_enc->irq_idx = -EINVAL;
 	init_completion(&vid_enc->vblank_completion);
 
 	phys_enc = &vid_enc->base;
@@ -643,6 +768,7 @@
 		goto fail;
 	}
 	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
 
 	/**
 	 * hw_intf resource permanently assigned to this encoder
@@ -660,10 +786,12 @@
 
 	if (!vid_enc->hw_intf) {
 		ret = -EINVAL;
-		DRM_ERROR("failed to get hw_intf\n");
+		SDE_ERROR("failed to get hw_intf\n");
 		goto fail;
 	}
 
+	SDE_DEBUG_VIDENC(vid_enc, "\n");
+
 	sde_encoder_phys_vid_init_ops(&phys_enc->ops);
 	phys_enc->parent = p->parent;
 	phys_enc->parent_ops = p->parent_ops;
@@ -672,20 +800,15 @@
 	phys_enc->intf_mode = INTF_MODE_VIDEO;
 	spin_lock_init(&phys_enc->spin_lock);
 	init_completion(&vid_enc->vblank_completion);
-
-	DRM_INFO_ONCE("intf %d: 3d blend modes not yet supported\n",
-			vid_enc->hw_intf->idx);
-	phys_enc->mode_3d = BLEND_3D_NONE;
-
+	atomic_set(&phys_enc->vblank_refcount, 0);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 
-
-	DBG("Created sde_encoder_phys_vid for intf %d", vid_enc->hw_intf->idx);
+	SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
 
 	return phys_enc;
 
 fail:
-	DRM_ERROR("Failed to create encoder\n");
+	SDE_ERROR("failed to create encoder\n");
 	if (vid_enc)
 		sde_encoder_phys_vid_destroy(phys_enc);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index ca7a711..665b044 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
 #include "sde_wb.h"
+#include "sde_vbif.h"
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
 #define WAIT_TIMEOUT_MSEC			84
@@ -217,13 +218,25 @@
 		SDE_ERROR("failed to get format %x\n", format->pixel_format);
 		return;
 	}
+	wb_cfg->roi = *wb_roi;
 
-	ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+	if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
+		ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+		if (ret) {
+			SDE_DEBUG("failed to populate layout %d\n", ret);
+			return;
+		}
+		wb_cfg->dest.width = fb->width;
+		wb_cfg->dest.height = fb->height;
+		wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+	} else {
+		ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
 			&wb_cfg->dest);
-	if (ret) {
-		/* this error should be detected during atomic_check */
-		SDE_DEBUG("failed to populate layout %d\n", ret);
-		return;
+		if (ret) {
+			/* this error should be detected during atomic_check */
+			SDE_DEBUG("failed to populate layout %d\n", ret);
+			return;
+		}
 	}
 
 	if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
@@ -241,6 +254,9 @@
 			wb_cfg->dest.plane_pitch[2],
 			wb_cfg->dest.plane_pitch[3]);
 
+	if (hw_wb->ops.setup_roi)
+		hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
 	if (hw_wb->ops.setup_outformat)
 		hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
 
@@ -262,6 +278,7 @@
 
 	intf_cfg->intf = SDE_NONE;
 	intf_cfg->wb = hw_wb->idx;
+	intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
 
 	if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
 		phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
@@ -526,7 +543,8 @@
 
 	complete_all(&wb_enc->wbdone_complete);
 
-	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
 }
 
 /**
@@ -766,15 +784,25 @@
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct drm_device *dev;
 	struct drm_connector *connector;
 
 	SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
 
+	if (!wb_enc->base.parent || !wb_enc->base.parent->dev) {
+		SDE_ERROR("invalid drm device\n");
+		return;
+	}
+	dev = wb_enc->base.parent->dev;
+
 	/* find associated writeback connector */
+	mutex_lock(&dev->mode_config.mutex);
 	drm_for_each_connector(connector, phys_enc->parent->dev) {
 		if (connector->encoder == phys_enc->parent)
 			break;
 	}
+	mutex_unlock(&dev->mode_config.mutex);
+
 	if (!connector || connector->encoder != phys_enc->parent) {
 		SDE_ERROR("failed to find writeback connector\n");
 		return;
@@ -857,17 +885,6 @@
 			hw_res->needs_cdm);
 }
 
-/**
- * sde_encoder_phys_wb_needs_ctl_start - Whether encoder needs ctl_start
- * @phys_enc:	Pointer to physical encoder
- * @Return:	Whether encoder needs ctl_start
- */
-static bool sde_encoder_phys_wb_needs_ctl_start(
-		struct sde_encoder_phys *phys_enc)
-{
-	return true;
-}
-
 #ifdef CONFIG_DEBUG_FS
 /**
  * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
@@ -969,7 +986,7 @@
 	ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
 	ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
-	ops->needs_ctl_start = sde_encoder_phys_wb_needs_ctl_start;
+	ops->trigger_start = sde_encoder_helper_trigger_start;
 }
 
 /**
@@ -1051,6 +1068,7 @@
 	phys_enc->sde_kms = p->sde_kms;
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_WB_LINE;
+	phys_enc->intf_idx = p->intf_idx;
 	spin_lock_init(&phys_enc->spin_lock);
 
 	ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 0ddfd42..23e5614 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -35,6 +35,22 @@
 	return sync_fence_wait(fence, timeout_ms);
 }
 
+uint32_t sde_sync_get_name_prefix(void *fence)
+{
+	char *name;
+	uint32_t i, prefix;
+
+	if (!fence)
+		return 0x0;
+
+	name = ((struct sync_fence *)fence)->name;
+	prefix = 0x0;
+	for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
+		prefix = (prefix << CHAR_BIT) | name[i];
+
+	return prefix;
+}
+
 #if IS_ENABLED(CONFIG_SW_SYNC)
 /**
  * _sde_fence_create_fd - create fence object and return an fd for it
@@ -197,10 +213,6 @@
 	else
 		SDE_ERROR("detected extra signal attempt!\n");
 
-	MSM_EVTMSG(fence->dev,
-			SDE_FENCE_TIMELINE_NAME(fence),
-			fence->done_count,
-			is_error);
 	/*
 	 * Always advance 'done' counter,
 	 * but only advance timeline if !error
@@ -216,6 +228,11 @@
 		else
 			sw_sync_timeline_inc(fence->timeline, (int)val);
 	}
+	MSM_EVTMSG(fence->dev,
+			SDE_FENCE_TIMELINE_NAME(fence),
+			fence->done_count,
+			((struct sw_sync_timeline *)
+				fence->timeline)->value);
 	mutex_unlock(&fence->fence_lock);
 }
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index f0839cf..b5980b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -17,6 +17,10 @@
 #include <linux/errno.h>
 #include <linux/mutex.h>
 
+#ifndef CHAR_BIT
+#define CHAR_BIT 8 /* define this if limits.h not available */
+#endif
+
 #ifdef CONFIG_SYNC
 /**
  * sde_sync_get - Query sync fence object from a file handle
@@ -48,6 +52,15 @@
  * Return: Zero on success, or -ETIME on timeout
  */
 int sde_sync_wait(void *fence, long timeout_ms);
+
+/**
+ * sde_sync_get_name_prefix - get integer representation of fence name prefix
+ * @fence: Pointer to opaque fence structure
+ *
+ * Return: 32-bit integer containing first 4 characters of fence name,
+ *         big-endian notation
+ */
+uint32_t sde_sync_get_name_prefix(void *fence);
 #else
 static inline void *sde_sync_get(uint64_t fd)
 {
@@ -62,6 +75,11 @@
 {
 	return 0;
 }
+
+static inline uint32_t sde_sync_get_name_prefix(void *fence)
+{
+	return 0x0;
+}
 #endif
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 6d9c574..5895158 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -102,163 +102,163 @@
 static const struct sde_format sde_format_map[] = {
 	INTERLEAVED_RGB_FMT(ARGB8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX8888,
 		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 4, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGB888,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
 		false, 3, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGR888,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
 		false, 3, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGB565,
 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGR565,
 		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ARGB1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR1555,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX5551,
 		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ARGB4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(ABGR4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBA4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRA4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		true, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XRGB4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(XBGR4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(RGBX4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
 	INTERLEAVED_RGB_FMT(BGRX4444,
 		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
-		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
 		false, 2, 0,
 		SDE_FETCH_LINEAR, 1),
 
@@ -312,13 +312,13 @@
 
 	PLANAR_YUV_FMT(YUV420,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C2_R_Cr, C1_B_Cb, C0_G_Y,
+		C0_G_Y, C1_B_Cb, C2_R_Cr,
 		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
 		SDE_FETCH_LINEAR, 3),
 
 	PLANAR_YUV_FMT(YVU420,
 		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
-		C1_B_Cb, C2_R_Cr, C0_G_Y,
+		C0_G_Y, C2_R_Cr, C1_B_Cb,
 		false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
 		SDE_FETCH_LINEAR, 3),
 };
@@ -675,7 +675,8 @@
 		struct drm_framebuffer *fb,
 		struct sde_hw_fmt_layout *layout)
 {
-	int ret;
+	uint32_t plane_addr[SDE_MAX_PLANES];
+	int i, ret;
 
 	if (!fb || !layout) {
 		DRM_ERROR("invalid arguments\n");
@@ -696,12 +697,19 @@
 	if (ret)
 		return ret;
 
+	for (i = 0; i < SDE_MAX_PLANES; ++i)
+		plane_addr[i] = layout->plane_addr[i];
+
 	/* Populate the addresses given the fb */
 	if (SDE_FORMAT_IS_UBWC(layout->format))
 		ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
 	else
 		ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
 
+	/* check if anything changed */
+	if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+		ret = -EAGAIN;
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index db2a4d3..5dcdfbb 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -80,7 +80,8 @@
  * @fb:                framebuffer pointer
  * @fmtl:              format layout structure to populate
  *
- * Return: error code on failure, 0 on success
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ *         are the same as before or 0 if new addresses were populated
  */
 int sde_format_populate_layout(
 		int mmu_id,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 97faec3..7c01596 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,28 +10,1421 @@
  * GNU General Public License for more details.
  */
 
-#include "sde_hw_catalog.h"
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
 
-struct sde_mdss_hw_cfg_handler cfg_table[] = {
-	{ .major = 1, .minor = 7, .cfg_init = sde_mdss_cfg_170_init},
-};
+#include "sde_hw_mdss.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_catalog_format.h"
+#include "sde_kms.h"
+
+/*************************************************************
+ * MACRO DEFINITION
+ *************************************************************/
 
 /**
- * sde_hw_catalog_init: Returns the catalog information for the
- * passed HW version
- * @major:  Major version of the MDSS HW
- * @minor: Minor version
- * @step: step version
+ * Max hardware block in certain hardware. For ex: sspp pipes
+ * can have QSEED, pcc, igc, pa, csc, etc. This count is max
+ * 12 based on software design. It should be increased if any of the
+ * hardware block has more subblocks.
  */
-struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step)
+#define MAX_SDE_HW_BLK  12
+
+/* each entry will have register address and bit offset in that register */
+#define MAX_BIT_OFFSET 2
+
+/* default line width for sspp */
+#define DEFAULT_SDE_LINE_WIDTH 2048
+
+/* max mixer blend stages */
+#define DEFAULT_SDE_MIXER_BLENDSTAGES 7
+
+/* max bank bit for macro tile and ubwc format */
+#define DEFAULT_SDE_HIGHEST_BANK_BIT 15
+
+/* default hardware block size if dtsi entry is not present */
+#define DEFAULT_SDE_HW_BLOCK_LEN 0x100
+
+/* default rects for multi rect case */
+#define DEFAULT_SDE_SSPP_MAX_RECTS 1
+
+/* total number of intf - dp, dsi, hdmi */
+#define INTF_COUNT			3
+
+#define MAX_SSPP_UPSCALE		20
+#define MAX_SSPP_DOWNSCALE		4
+#define SSPP_UNITY_SCALE		1
+
+#define MAX_HORZ_DECIMATION		4
+#define MAX_VERT_DECIMATION		4
+
+#define MAX_SPLIT_DISPLAY_CTL		2
+#define MAX_PP_SPLIT_DISPLAY_CTL	1
+
+#define MDSS_BASE_OFFSET		0x0
+
+#define ROT_LM_OFFSET			3
+#define LINE_LM_OFFSET			5
+#define LINE_MODE_WB_OFFSET		2
+
+/* maximum XIN halt timeout in usec */
+#define VBIF_XIN_HALT_TIMEOUT		0x4000
+
+#define DEFAULT_CREQ_LUT_NRT		0x0
+#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
+
+/*************************************************************
+ *  DTSI PROPERTY INDEX
+ *************************************************************/
+enum {
+	HW_OFF,
+	HW_LEN,
+};
+
+enum sde_prop {
+	SDE_OFF,
+	SDE_LEN,
+	SSPP_LINEWIDTH,
+	MIXER_LINEWIDTH,
+	MIXER_BLEND,
+	WB_LINEWIDTH,
+	BANK_BIT,
+	QSEED_TYPE,
+	PANIC_PER_PIPE,
+	CDP,
+	SRC_SPLIT,
+};
+
+enum {
+	SSPP_OFF,
+	SSPP_SIZE,
+	SSPP_TYPE,
+	SSPP_XIN,
+	SSPP_CLK_CTRL,
+	SSPP_CLK_STATUS,
+	SSPP_DANGER,
+	SSPP_SAFE,
+	SSPP_MAX_RECTS,
+	SSPP_SCALE_SIZE,
+	SSPP_QSEED_OFF,
+	SSPP_CSC_OFF,
+};
+
+enum {
+	INTF_OFF,
+	INTF_LEN,
+	INTF_PREFETCH,
+	INTF_TYPE,
+};
+
+enum {
+	PP_OFF,
+	PP_LEN,
+	TE_OFF,
+	TE_LEN,
+	TE2_OFF,
+	TE2_LEN,
+	DSC_OFF,
+	DSC_LEN,
+};
+
+enum {
+	DSPP_OFF,
+	DSPP_SIZE,
+	DSPP_IGC,
+	DSPP_PCC,
+	DSPP_GC,
+	DSPP_PA,
+	DSPP_GAMUT,
+	DSPP_DITHER,
+	DSPP_HIST,
+	DSPP_AD,
+};
+
+enum {
+	MIXER_OFF,
+	MIXER_LEN,
+	MIXER_GC,
+};
+
+enum {
+	WB_OFF,
+	WB_LEN,
+	WB_ID,
+	WB_XIN_ID,
+	WB_CLK_CTRL,
+};
+
+enum {
+	VBIF_OFF,
+	VBIF_LEN,
+	VBIF_ID,
+	VBIF_DEFAULT_OT_RD_LIMIT,
+	VBIF_DEFAULT_OT_WR_LIMIT,
+	VBIF_DYNAMIC_OT_RD_LIMIT,
+	VBIF_DYNAMIC_OT_WR_LIMIT,
+};
+
+/*************************************************************
+ * dts property definition
+ *************************************************************/
+enum prop_type {
+	PROP_TYPE_BOOL,
+	PROP_TYPE_U32,
+	PROP_TYPE_U32_ARRAY,
+	PROP_TYPE_STRING,
+	PROP_TYPE_STRING_ARRAY,
+	PROP_TYPE_BIT_OFFSET_ARRAY,
+};
+
+struct sde_prop_type {
+	/* use property index from enum property for readability purpose */
+	u8 id;
+	/* it should be property name based on dtsi documentation */
+	char *prop_name;
+	/**
+	 * if property is marked mandatory then it will fail parsing
+	 * when property is not present
+	 */
+	u32  is_mandatory;
+	/* property type based on "enum prop_type"  */
+	enum prop_type type;
+};
+
+/*************************************************************
+ * dts property list
+ *************************************************************/
+static struct sde_prop_type sde_prop[] = {
+	{SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
+	{SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
+	{SSPP_LINEWIDTH, "qcom,sde-sspp-linewidth", false, PROP_TYPE_U32},
+	{MIXER_LINEWIDTH, "qcom,sde-mixer-linewidth", false, PROP_TYPE_U32},
+	{MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
+	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
+	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
+	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
+	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
+	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
+};
+
+static struct sde_prop_type sspp_prop[] = {
+	{SSPP_OFF, "qcom,sde-sspp-off", true, PROP_TYPE_U32_ARRAY},
+	{SSPP_SIZE, "qcom,sde-sspp-src-size", false, PROP_TYPE_U32},
+	{SSPP_TYPE, "qcom,sde-sspp-type", true, PROP_TYPE_STRING_ARRAY},
+	{SSPP_XIN, "qcom,sde-sspp-xin-id", true, PROP_TYPE_U32_ARRAY},
+	{SSPP_CLK_CTRL, "qcom,sde-sspp-clk-ctrl", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+	{SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+	{SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
+	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
+	{SSPP_QSEED_OFF, "qcom,sde-sspp-qseed-off", false, PROP_TYPE_U32},
+	{SSPP_CSC_OFF, "qcom,sde-sspp-csc-off", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type ctl_prop[] = {
+	{HW_OFF, "qcom,sde-ctl-off", true, PROP_TYPE_U32_ARRAY},
+	{HW_LEN, "qcom,sde-ctl-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type mixer_prop[] = {
+	{MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
+	{MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
+	{MIXER_GC, "qcom,sde-has-mixer-gc", false, PROP_TYPE_BOOL},
+};
+
+static struct sde_prop_type dspp_prop[] = {
+	{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
+	{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
+	{DSPP_IGC, "qcom,sde-dspp-igc-off", false, PROP_TYPE_U32},
+	{DSPP_PCC, "qcom,sde-dspp-pcc-off", false, PROP_TYPE_U32},
+	{DSPP_GC, "qcom,sde-dspp-gc-off", false, PROP_TYPE_U32},
+	{DSPP_PA, "qcom,sde-dspp-pa-off", false, PROP_TYPE_U32},
+	{DSPP_GAMUT, "qcom,sde-dspp-gamut-off", false, PROP_TYPE_U32},
+	{DSPP_DITHER, "qcom,sde-dspp-dither-off", false, PROP_TYPE_U32},
+	{DSPP_HIST, "qcom,sde-dspp-hist-off", false, PROP_TYPE_U32},
+	{DSPP_AD, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type pp_prop[] = {
+	{PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
+	{PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
+	{TE_OFF, "qcom,sde-te-off", false, PROP_TYPE_U32_ARRAY},
+	{TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
+	{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
+	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
+	{DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
+	{DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type cdm_prop[] = {
+	{HW_OFF, "qcom,sde-cdm-off", false, PROP_TYPE_U32_ARRAY},
+	{HW_LEN, "qcom,sde-cdm-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type intf_prop[] = {
+	{INTF_OFF, "qcom,sde-intf-off", true, PROP_TYPE_U32_ARRAY},
+	{INTF_LEN, "qcom,sde-intf-size", false, PROP_TYPE_U32},
+	{INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
+						PROP_TYPE_U32_ARRAY},
+	{INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type wb_prop[] = {
+	{WB_OFF, "qcom,sde-wb-off", true, PROP_TYPE_U32_ARRAY},
+	{WB_LEN, "qcom,sde-wb-size", false, PROP_TYPE_U32},
+	{WB_ID, "qcom,sde-wb-id", true, PROP_TYPE_U32_ARRAY},
+	{WB_XIN_ID, "qcom,sde-wb-xin-id", false, PROP_TYPE_U32_ARRAY},
+	{WB_CLK_CTRL, "qcom,sde-wb-clk-ctrl", false,
+		PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
+static struct sde_prop_type vbif_prop[] = {
+	{VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
+	{VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
+	{VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
+		PROP_TYPE_U32},
+	{VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
+		PROP_TYPE_U32},
+	{VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
+		PROP_TYPE_U32_ARRAY},
+};
+
+/*************************************************************
+ * static API list
+ *************************************************************/
+static int _parse_dt_u32_handler(struct device_node *np,
+	char *prop_name, u32 *offsets, int len, bool mandatory)
+{
+	int rc = of_property_read_u32_array(np, prop_name, offsets, len);
+
+	if (rc && mandatory)
+		SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
+				prop_name, len);
+	else if (rc)
+		SDE_DEBUG("optional prop: %s u32 array read len:%d\n",
+				prop_name, len);
+
+	return rc;
+}
+
+static int _parse_dt_bit_offset(struct device_node *np,
+	char *prop_name, u32 prop_value[][MAX_BIT_OFFSET],
+	u32 count, bool mandatory)
+{
+	int rc = 0, len, i, j;
+	const u32 *arr;
+
+	arr = of_get_property(np, prop_name, &len);
+	if (arr) {
+		len /= sizeof(u32);
+		for (i = 0, j = 0; i < len; j++) {
+			prop_value[j][0] = be32_to_cpu(arr[i]);
+			i++;
+			prop_value[j][1] = be32_to_cpu(arr[i]);
+			i++;
+		}
+	} else {
+		if (mandatory) {
+			SDE_ERROR("error mandatory property '%s' not found\n",
+				prop_name);
+			rc = -EINVAL;
+		} else {
+			SDE_DEBUG("error optional property '%s' not found\n",
+				prop_name);
+		}
+	}
+
+	return rc;
+}
+
+static int _validate_dt_entry(struct device_node *np,
+	struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+	int *off_count)
+{
+	int rc = 0, i, val;
+
+	*off_count = of_property_count_u32_elems(np, sde_prop[0].prop_name);
+	if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
+		SDE_ERROR("invalid hw offset prop name:%s count:%d\n",
+			sde_prop[0].prop_name, *off_count);
+		*off_count = 0;
+		return sde_prop[0].is_mandatory ? -EINVAL : 0;
+	}
+
+	for (i = 0; i < prop_size && i < MAX_BLOCKS; i++) {
+		switch (sde_prop[i].type) {
+		case PROP_TYPE_U32:
+			rc = of_property_read_u32(np, sde_prop[i].prop_name,
+				&val);
+			break;
+		case PROP_TYPE_U32_ARRAY:
+			prop_count[i] = of_property_count_u32_elems(np,
+				sde_prop[i].prop_name);
+			break;
+		case PROP_TYPE_STRING_ARRAY:
+			prop_count[i] = of_property_count_strings(np,
+				sde_prop[i].prop_name);
+			break;
+		case PROP_TYPE_BIT_OFFSET_ARRAY:
+			of_get_property(np, sde_prop[i].prop_name, &val);
+			prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
+			break;
+		default:
+			SDE_DEBUG("invalid property type:%d\n",
+							sde_prop[i].type);
+			break;
+		}
+		SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+			prop_count:%d\n", i, sde_prop[i].prop_name,
+			sde_prop[i].type, prop_count[i]);
+
+		if (rc && sde_prop[i].is_mandatory &&
+		   (sde_prop[i].type == PROP_TYPE_U32)) {
+			SDE_ERROR("prop:%s not present\n",
+						sde_prop[i].prop_name);
+			goto end;
+		} else if (sde_prop[i].type == PROP_TYPE_U32 ||
+			sde_prop[i].type == PROP_TYPE_BOOL) {
+			rc = 0;
+			continue;
+		}
+
+		if ((prop_count[i] != *off_count) && sde_prop[i].is_mandatory) {
+			SDE_ERROR("prop:%s count:%d is different compared to \"\
+				offset array:%d\n", sde_prop[i].prop_name,
+				prop_count[i], *off_count);
+			rc = -EINVAL;
+			goto end;
+		} else if (prop_count[i] != *off_count) {
+			SDE_DEBUG("prop:%s count:%d is different compared to \"\
+				offset array:%d\n", sde_prop[i].prop_name,
+				prop_count[i], *off_count);
+			rc = 0;
+			prop_count[i] = 0;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int _read_dt_entry(struct device_node *np,
+	struct sde_prop_type *sde_prop, u32 prop_size, u32 *prop_count,
+	u32 prop_value[][MAX_SDE_HW_BLK],
+	u32 bit_value[][MAX_SDE_HW_BLK][MAX_BIT_OFFSET])
+{
+	int rc = 0, i, j;
+
+	for (i = 0; i < prop_size && i < MAX_BLOCKS; i++) {
+		switch (sde_prop[i].type) {
+		case PROP_TYPE_U32:
+			of_property_read_u32(np, sde_prop[i].prop_name,
+				&prop_value[i][0]);
+			SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+				 value:0x%x\n", i, sde_prop[i].prop_name,
+				sde_prop[i].type, prop_value[i][0]);
+			break;
+		case PROP_TYPE_BOOL:
+			prop_value[i][0] =  of_property_read_bool(np,
+				sde_prop[i].prop_name);
+			SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+				value:0x%x\n", i, sde_prop[i].prop_name,
+				sde_prop[i].type, prop_value[i][0]);
+			break;
+		case PROP_TYPE_U32_ARRAY:
+			rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
+				prop_value[i], prop_count[i],
+				sde_prop[i].is_mandatory);
+			if (rc && sde_prop[i].is_mandatory) {
+				SDE_ERROR("%s prop validation success but \"\
+					read failed\n", sde_prop[i].prop_name);
+				goto end;
+			} else {
+				/* only for debug purpose */
+				SDE_DEBUG("prop id:%d prop name:%s prop \"\
+					type:%d", i, sde_prop[i].prop_name,
+					sde_prop[i].type);
+				for (j = 0; j < prop_count[i]; j++)
+					SDE_DEBUG(" value[%d]:0x%x ", j,
+							prop_value[i][j]);
+				SDE_DEBUG("\n");
+			}
+			break;
+		case PROP_TYPE_BIT_OFFSET_ARRAY:
+			rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
+				bit_value[i], prop_count[i],
+				sde_prop[i].is_mandatory);
+			if (rc && sde_prop[i].is_mandatory) {
+				SDE_ERROR("%s prop validation success but \"\
+					read failed\n", sde_prop[i].prop_name);
+				goto end;
+			} else {
+				SDE_DEBUG("prop id:%d prop name:%s prop \"\
+					type:%d", i, sde_prop[i].prop_name,
+					sde_prop[i].type);
+				for (j = 0; j < prop_count[i]; j++)
+					SDE_DEBUG(" count[%d]: bit:0x%x \"\
+					off:0x%x ", j, bit_value[i][j][0],
+					bit_value[i][j][1]);
+				SDE_DEBUG("\n");
+			}
+			break;
+		default:
+			SDE_DEBUG("invalid property type:%d\n",
+							sde_prop[i].type);
+			break;
+		}
+		rc = 0;
+	}
+
+end:
+	return rc;
+}
+
+static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	u32 prop_value[][MAX_SDE_HW_BLK], u32 *vig_count)
+{
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
+	}
+
+	set_bit(SDE_SSPP_CSC, &sspp->features);
+	sblk->csc_blk.base = prop_value[SSPP_CSC_OFF][0];
+	sblk->csc_blk.id = SDE_SSPP_CSC;
+
+	sblk->maxupscale = MAX_SSPP_UPSCALE;
+	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sspp->id = SSPP_VIG0 + *vig_count;
+	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
+
+	sblk->format_list = plane_formats_yuv;
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*vig_count)++;
+}
+
+static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	u32 prop_value[][MAX_SDE_HW_BLK], u32 *rgb_count)
+{
+	if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
+	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+		sblk->scaler_blk.base = prop_value[SSPP_QSEED_OFF][0];
+	}
+
+	sblk->maxupscale = MAX_SSPP_UPSCALE;
+	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sspp->id = SSPP_RGB0 + *rgb_count;
+	sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
+	sblk->format_list = plane_formats;
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*rgb_count)++;
+}
+
+static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	u32 prop_value[][MAX_SDE_HW_BLK], u32 *cursor_count)
+{
+	set_bit(SDE_SSPP_CURSOR, &sspp->features);
+	sblk->maxupscale = SSPP_UNITY_SCALE;
+	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sspp->id = SSPP_CURSOR0 + *cursor_count;
+	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
+	sblk->format_list = plane_formats;
+	(*cursor_count)++;
+}
+
+static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
+	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+	u32 prop_value[][MAX_SDE_HW_BLK], u32 *dma_count)
+{
+	sblk->maxupscale = SSPP_UNITY_SCALE;
+	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sspp->id = SSPP_DMA0 + *dma_count;
+	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
+	sblk->format_list = plane_formats;
+	set_bit(SDE_SSPP_QOS, &sspp->features);
+	(*dma_count)++;
+}
+
+static int sde_sspp_parse_dt(struct device_node *np,
+	struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], off_count, i, j;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK];
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+	const char *type;
+	struct sde_sspp_cfg *sspp;
+	struct sde_sspp_sub_blks *sblk;
+	u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
+	u32 danger_count = 0, safe_count = 0;
+
+	rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
+			&prop_count[SSPP_DANGER], &danger_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
+			&prop_count[SSPP_SAFE], &safe_count);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
+							prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	sde_cfg->sspp_count = off_count;
+
+	for (i = 0; i < off_count; i++) {
+		sspp = sde_cfg->sspp + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		sspp->sblk = sblk;
+
+		sspp->base = prop_value[SSPP_OFF][i];
+		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
+
+		set_bit(SDE_SSPP_SRC, &sspp->features);
+		sblk->src_blk.id = SDE_SSPP_SRC;
+
+		of_property_read_string_index(np,
+				sspp_prop[SSPP_TYPE].prop_name, i, &type);
+		if (!strcmp(type, "vig")) {
+			_sde_sspp_setup_vig(sde_cfg, sspp, sblk, prop_value,
+								&vig_count);
+		} else if (!strcmp(type, "rgb")) {
+			_sde_sspp_setup_rgb(sde_cfg, sspp, sblk, prop_value,
+								&rgb_count);
+		} else if (!strcmp(type, "cursor")) {
+			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, prop_value,
+								&cursor_count);
+		} else if (!strcmp(type, "dma")) {
+			_sde_sspp_setup_dma(sde_cfg, sspp, sblk, prop_value,
+								&dma_count);
+		} else {
+			SDE_ERROR("invalid sspp type:%s\n", type);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
+		sblk->maxvdeciexp = MAX_VERT_DECIMATION;
+
+		sspp->xin_id = prop_value[SSPP_XIN][i];
+		sblk->danger_lut_linear = prop_value[SSPP_DANGER][0];
+		sblk->danger_lut_tile = prop_value[SSPP_DANGER][1];
+		sblk->danger_lut_nrt = prop_value[SSPP_DANGER][2];
+		sblk->safe_lut_linear = prop_value[SSPP_SAFE][0];
+		sblk->safe_lut_tile = prop_value[SSPP_SAFE][1];
+		sblk->safe_lut_nrt = prop_value[SSPP_SAFE][2];
+		sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
+		sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
+		sblk->src_blk.len = prop_value[SSPP_SIZE][0];
+
+		for (j = 0; j < sde_cfg->mdp_count; j++) {
+			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
+					bit_value[SSPP_CLK_CTRL][i][0];
+			sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
+					bit_value[SSPP_CLK_CTRL][i][1];
+		}
+
+		SDE_DEBUG(
+			"xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+			sspp->xin_id,
+			sblk->danger_lut_linear,
+			sblk->danger_lut_tile,
+			sblk->danger_lut_nrt,
+			sblk->safe_lut_linear,
+			sblk->safe_lut_tile,
+			sblk->safe_lut_nrt,
+			sblk->creq_lut_nrt,
+			sblk->pixel_ram_size,
+			sspp->clk_ctrl,
+			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
+			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].bit_off);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_ctl_parse_dt(struct device_node *np,
+		struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { {0} };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+	struct sde_ctl_cfg *ctl;
+	u32 off_count;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument input param\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->ctl_count = off_count;
+
+	rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		ctl = sde_cfg->ctl + i;
+		ctl->base = prop_value[HW_OFF][i];
+		ctl->id = CTL_0 + i;
+
+		if (i < MAX_SPLIT_DISPLAY_CTL)
+			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
+		if (i < MAX_PP_SPLIT_DISPLAY_CTL)
+			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_mixer_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+
+	u32 off_count, max_blendstages;
+	u32 blend_reg_base[] = {0x20, 0x50, 0x80, 0xb0, 0x230, 0x260, 0x290};
+	u32 lm_pair_mask[] = {LM_1, LM_0, LM_5, 0x0, 0x0, LM_2};
+	struct sde_lm_cfg *mixer;
+	struct sde_lm_sub_blks *sblk;
+	int pp_count, dspp_count;
+	u32 pp_idx, dspp_idx;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument input param\n");
+		rc = -EINVAL;
+		goto end;
+	}
+	max_blendstages = sde_cfg->max_mixer_blendstages;
+
+	rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->mixer_count = off_count;
+
+	rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	pp_count = sde_cfg->pingpong_count;
+	dspp_count = sde_cfg->dspp_count;
+
+	for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
+		mixer = sde_cfg->mixer + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		mixer->sblk = sblk;
+
+		mixer->base = prop_value[HW_OFF][i];
+		mixer->len = prop_value[HW_LEN][0];
+		mixer->id = LM_0 + i;
+		if (!mixer->len)
+			mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+		if (lm_pair_mask[i])
+			mixer->lm_pair_mask = 1 << lm_pair_mask[i];
+
+		sblk->maxblendstages = max_blendstages;
+		sblk->maxwidth = sde_cfg->max_mixer_width;
+		memcpy(sblk->blendstage_base, blend_reg_base, sizeof(u32) *
+			min_t(u32, MAX_BLOCKS, min_t(u32,
+			ARRAY_SIZE(blend_reg_base), max_blendstages)));
+		if (sde_cfg->has_src_split)
+			set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
+		if (prop_value[MIXER_GC][0])
+			set_bit(SDE_MIXER_GC, &mixer->features);
+
+		if ((i < ROT_LM_OFFSET) || (i >= LINE_LM_OFFSET)) {
+			mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
+								: PINGPONG_MAX;
+			mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
+								: DSPP_MAX;
+			pp_count--;
+			dspp_count--;
+			pp_idx++;
+			dspp_idx++;
+		} else {
+			mixer->pingpong = PINGPONG_MAX;
+			mixer->dspp = DSPP_MAX;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int sde_intf_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+					= { { { 0 } } };
+	u32 off_count;
+	u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
+	const char *type;
+	struct sde_intf_cfg *intf;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->intf_count = off_count;
+
+	rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		intf = sde_cfg->intf + i;
+		intf->base = prop_value[INTF_OFF][i];
+		intf->len = prop_value[INTF_LEN][0];
+		intf->id = INTF_0 + i;
+		if (!intf->len)
+			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+		intf->prog_fetch_lines_worst_case =
+					prop_value[INTF_PREFETCH][i];
+
+		of_property_read_string_index(np,
+				intf_prop[INTF_TYPE].prop_name, i, &type);
+		if (!strcmp(type, "dsi")) {
+			intf->type = INTF_DSI;
+			intf->controller_id = dsi_count;
+			dsi_count++;
+		} else if (!strcmp(type, "hdmi")) {
+			intf->type = INTF_HDMI;
+			intf->controller_id = hdmi_count;
+			hdmi_count++;
+		} else if (!strcmp(type, "dp")) {
+			intf->type = INTF_DP;
+			intf->controller_id = dp_count;
+			dp_count++;
+		} else {
+			intf->type = INTF_NONE;
+			intf->controller_id = none_count;
+			none_count++;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i, j;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+					= { { { 0 } } };
+	u32 off_count;
+	struct sde_wb_cfg *wb;
+	struct sde_wb_sub_blocks *sblk;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->wb_count = off_count;
+
+	rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		wb = sde_cfg->wb + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		wb->sblk = sblk;
+
+		wb->base = prop_value[WB_OFF][i];
+		wb->id = WB_0 + prop_value[WB_ID][i];
+		wb->clk_ctrl = SDE_CLK_CTRL_WB0 + prop_value[WB_ID][i];
+		wb->xin_id = prop_value[WB_XIN_ID][i];
+		wb->vbif_idx = VBIF_NRT;
+		wb->len = prop_value[WB_LEN][0];
+		wb->format_list = wb2_formats;
+		if (!wb->len)
+			wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
+		sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
+
+		if (wb->id >= LINE_MODE_WB_OFFSET)
+			set_bit(SDE_WB_LINE_MODE, &wb->features);
+		else
+			set_bit(SDE_WB_BLOCK_MODE, &wb->features);
+		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
+		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+
+		for (j = 0; j < sde_cfg->mdp_count; j++) {
+			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
+					bit_value[WB_CLK_CTRL][i][0];
+			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
+					bit_value[WB_CLK_CTRL][i][1];
+		}
+
+		SDE_DEBUG(
+			"wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
+			wb->id - WB_0,
+			wb->xin_id,
+			wb->vbif_idx,
+			wb->clk_ctrl,
+			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].reg_off,
+			sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].bit_off);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_dspp_parse_dt(struct device_node *np,
+						struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+	u32 off_count;
+	struct sde_dspp_cfg *dspp;
+	struct sde_dspp_sub_blks *sblk;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
+		prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->dspp_count = off_count;
+
+	rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		dspp = sde_cfg->dspp + i;
+		dspp->base = prop_value[DSPP_OFF][i];
+		dspp->id = DSPP_0 + i;
+
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		dspp->sblk = sblk;
+
+		sblk->igc.base = prop_value[DSPP_IGC][0];
+		if (sblk->igc.base)
+			set_bit(SDE_DSPP_IGC, &dspp->features);
+
+		sblk->pcc.base = prop_value[DSPP_PCC][0];
+		if (sblk->pcc.base)
+			set_bit(SDE_DSPP_PCC, &dspp->features);
+
+		sblk->gc.base = prop_value[DSPP_GC][0];
+		if (sblk->gc.base)
+			set_bit(SDE_DSPP_GC, &dspp->features);
+
+		sblk->gamut.base = prop_value[DSPP_GAMUT][0];
+		if (sblk->gamut.base)
+			set_bit(SDE_DSPP_GAMUT, &dspp->features);
+
+		sblk->dither.base = prop_value[DSPP_DITHER][0];
+		if (sblk->dither.base)
+			set_bit(SDE_DSPP_DITHER, &dspp->features);
+
+		sblk->hist.base = prop_value[DSPP_HIST][0];
+		if (sblk->hist.base)
+			set_bit(SDE_DSPP_HIST, &dspp->features);
+
+		sblk->ad.base = prop_value[DSPP_AD][i];
+		if (sblk->ad.base)
+			set_bit(SDE_DSPP_AD, &dspp->features);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_cdm_parse_dt(struct device_node *np,
+				struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+	u32 off_count;
+	struct sde_cdm_cfg *cdm;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->cdm_count = off_count;
+
+	rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		cdm = sde_cfg->cdm + i;
+		cdm->base = prop_value[HW_OFF][i];
+		cdm->id = CDM_0 + i;
+		cdm->len = prop_value[HW_LEN][0];
+
+		/* intf3 and wb2 for cdm block */
+		cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
+		cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_vbif_parse_dt(struct device_node *np,
+				struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i, j, k;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+	struct sde_vbif_cfg *vbif;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
+			prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
+			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
+			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+	if (rc)
+		goto end;
+
+	sde_cfg->vbif_count = off_count;
+
+	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	vbif_len = prop_value[VBIF_LEN][0];
+	if (!vbif_len)
+		vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+	for (i = 0; i < off_count; i++) {
+		vbif = sde_cfg->vbif + i;
+		vbif->base = prop_value[VBIF_OFF][i];
+		vbif->len = vbif_len;
+		vbif->id = VBIF_0 + prop_value[VBIF_ID][i];
+
+		SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
+
+		vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
+
+		vbif->default_ot_rd_limit =
+				prop_value[VBIF_DEFAULT_OT_RD_LIMIT][0];
+		SDE_DEBUG("default_ot_rd_limit=%u\n",
+				vbif->default_ot_rd_limit);
+
+		vbif->default_ot_wr_limit =
+				prop_value[VBIF_DEFAULT_OT_WR_LIMIT][0];
+		SDE_DEBUG("default_ot_wr_limit=%u\n",
+				vbif->default_ot_wr_limit);
+
+		vbif->dynamic_ot_rd_tbl.count =
+				prop_count[VBIF_DYNAMIC_OT_RD_LIMIT] / 2;
+		SDE_DEBUG("dynamic_ot_rd_tbl.count=%u\n",
+				vbif->dynamic_ot_rd_tbl.count);
+		if (vbif->dynamic_ot_rd_tbl.count) {
+			vbif->dynamic_ot_rd_tbl.cfg = kcalloc(
+				vbif->dynamic_ot_rd_tbl.count,
+				sizeof(struct sde_vbif_dynamic_ot_cfg),
+				GFP_KERNEL);
+			if (!vbif->dynamic_ot_rd_tbl.cfg) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		}
+
+		for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
+				prop_value[VBIF_DYNAMIC_OT_RD_LIMIT][k++];
+			vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
+				prop_value[VBIF_DYNAMIC_OT_RD_LIMIT][k++];
+			SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
+				vbif->dynamic_ot_rd_tbl.cfg[j].pps,
+				vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
+		}
+
+		vbif->dynamic_ot_wr_tbl.count =
+				prop_count[VBIF_DYNAMIC_OT_WR_LIMIT] / 2;
+		SDE_DEBUG("dynamic_ot_wr_tbl.count=%u\n",
+				vbif->dynamic_ot_wr_tbl.count);
+		if (vbif->dynamic_ot_wr_tbl.count) {
+			vbif->dynamic_ot_wr_tbl.cfg = kcalloc(
+				vbif->dynamic_ot_wr_tbl.count,
+				sizeof(struct sde_vbif_dynamic_ot_cfg),
+				GFP_KERNEL);
+			if (!vbif->dynamic_ot_wr_tbl.cfg) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		}
+
+		for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
+				prop_value[VBIF_DYNAMIC_OT_WR_LIMIT][k++];
+			vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
+				prop_value[VBIF_DYNAMIC_OT_WR_LIMIT][k++];
+			SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
+				vbif->dynamic_ot_wr_tbl.cfg[j].pps,
+				vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
+		}
+
+		if (vbif->default_ot_rd_limit || vbif->default_ot_wr_limit ||
+				vbif->dynamic_ot_rd_tbl.count ||
+				vbif->dynamic_ot_wr_tbl.count)
+			set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+	}
+
+end:
+	return rc;
+}
+
+static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { { 0 } };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+				= { { { 0 } } };
+	u32 off_count;
+	struct sde_pingpong_cfg *pp;
+	struct sde_pingpong_sub_blks *sblk;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->pingpong_count = off_count;
+
+	rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		pp = sde_cfg->pingpong + i;
+		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+		if (!sblk) {
+			rc = -ENOMEM;
+			/* catalog deinit will release the allocated blocks */
+			goto end;
+		}
+		pp->sblk = sblk;
+
+		pp->base = prop_value[PP_OFF][i];
+		pp->id = PINGPONG_0 + i;
+		pp->len = prop_value[PP_LEN][0];
+
+		sblk->te.base = prop_value[TE_OFF][i];
+		sblk->te.id = SDE_PINGPONG_TE;
+		set_bit(SDE_PINGPONG_TE, &pp->features);
+
+		sblk->te2.base = prop_value[TE2_OFF][i];
+		if (sblk->te2.base) {
+			sblk->te2.id = SDE_PINGPONG_TE2;
+			set_bit(SDE_PINGPONG_TE2, &pp->features);
+			set_bit(SDE_PINGPONG_SPLIT, &pp->features);
+		}
+
+		sblk->dsc.base = prop_value[DSC_OFF][i];
+		if (sblk->dsc.base) {
+			sblk->dsc.id = SDE_PINGPONG_DSC;
+			set_bit(SDE_PINGPONG_DSC, &pp->features);
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+	int rc, len, prop_count[MAX_BLOCKS];
+	u32 prop_value[MAX_BLOCKS][MAX_SDE_HW_BLK] = { {0} };
+	u32 bit_value[MAX_BLOCKS][MAX_SDE_HW_BLK][MAX_BIT_OFFSET]
+			= { { { 0 } } };
+	const char *type;
+
+	if (!cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+		&len);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+		prop_value, bit_value);
+	if (rc)
+		goto end;
+
+	cfg->mdss_count = 1;
+	cfg->mdss[0].base = MDSS_BASE_OFFSET;
+	cfg->mdss[0].id = MDP_TOP;
+
+	cfg->mdp_count = 1;
+	cfg->mdp[0].id = MDP_TOP;
+	cfg->mdp[0].base = prop_value[SDE_OFF][0];
+	cfg->mdp[0].len = prop_value[SDE_LEN][0];
+	if (!cfg->mdp[0].len)
+		cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+	cfg->max_sspp_linewidth = prop_value[SSPP_LINEWIDTH][0];
+	if (!cfg->max_sspp_linewidth)
+		cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->max_mixer_width = prop_value[MIXER_LINEWIDTH][0];
+	if (!cfg->max_mixer_width)
+		cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->max_mixer_blendstages = prop_value[MIXER_BLEND][0];
+	if (!cfg->max_mixer_blendstages)
+		cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
+
+	cfg->max_wb_linewidth = prop_value[WB_LINEWIDTH][0];
+	if (!cfg->max_wb_linewidth)
+		cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+	cfg->mdp[0].highest_bank_bit = prop_value[BANK_BIT][0];
+	if (!cfg->mdp[0].highest_bank_bit)
+		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
+
+	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
+	if (!rc && !strcmp(type, "qseedv3"))
+		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
+	else if (!rc && !strcmp(type, "qseedv2"))
+		cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+
+	cfg->has_src_split = prop_value[SRC_SPLIT][0];
+end:
+	return rc;
+}
+
+static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+{
+	switch (hw_rev) {
+	case SDE_HW_VER_170:
+	case SDE_HW_VER_171:
+	case SDE_HW_VER_172:
+		/* update msm8996 target here */
+		break;
+	case SDE_HW_VER_300:
+	case SDE_HW_VER_400:
+		/* update msm8998 and skunk target here */
+		break;
+	}
+}
+
+static void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(cfg_table); i++) {
-		if ((cfg_table[i].major == major) &&
-		(cfg_table[i].minor == minor))
-			return cfg_table[i].cfg_init(step);
-	}
+	if (!sde_cfg)
+		return;
 
-	return ERR_PTR(-ENODEV);
+	for (i = 0; i < sde_cfg->sspp_count; i++)
+		kfree(sde_cfg->sspp[i].sblk);
+
+	for (i = 0; i < sde_cfg->mixer_count; i++)
+		kfree(sde_cfg->mixer[i].sblk);
+
+	for (i = 0; i < sde_cfg->wb_count; i++)
+		kfree(sde_cfg->wb[i].sblk);
+
+	for (i = 0; i < sde_cfg->dspp_count; i++)
+		kfree(sde_cfg->dspp[i].sblk);
+
+	for (i = 0; i < sde_cfg->pingpong_count; i++)
+		kfree(sde_cfg->pingpong[i].sblk);
+
+	for (i = 0; i < sde_cfg->vbif_count; i++) {
+		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
+		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+	}
+}
+
+/*************************************************************
+ * hardware catalog init
+ *************************************************************/
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
+{
+	int rc;
+	struct sde_mdss_cfg *sde_cfg;
+	struct device_node *np = dev->dev->of_node;
+
+	sde_cfg = kzalloc(sizeof(*sde_cfg), GFP_KERNEL);
+	if (!sde_cfg)
+		return ERR_PTR(-ENOMEM);
+
+	sde_cfg->hwversion = hw_rev;
+
+	rc = sde_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_ctl_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_sspp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_dspp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_pp_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	/* mixer parsing should be done after dspp and pp for mapping setup */
+	rc = sde_mixer_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_intf_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_wb_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	/* cdm parsing should be done after intf and wb for mapping setup */
+	rc = sde_cdm_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = sde_vbif_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
+	sde_hardware_caps(sde_cfg, hw_rev);
+
+	return sde_cfg;
+
+end:
+	sde_hw_catalog_deinit(sde_cfg);
+	kfree(sde_cfg);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 00c7a29..911fbe2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,16 +17,21 @@
 #include <linux/bug.h>
 #include <linux/bitmap.h>
 #include <linux/err.h>
+#include <drm/drmP.h>
 
-#define MAX_BLOCKS    8
-#define MAX_LAYERS    12
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
 
 #define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
 		((MINOR & 0xFFF) << 16)  |\
 		(STEP & 0xFFFF))
 
 #define SDE_HW_MAJOR(rev)		((rev) >> 28)
-#define SDE_HW_MINOR(rev)		.(((rev) >> 16) & 0xFFF)
+#define SDE_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
 #define SDE_HW_STEP(rev)		((rev) & 0xFFFF)
 #define SDE_HW_MAJOR_MINOR(rev)		((rev) >> 16)
 
@@ -37,12 +42,20 @@
 #define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
 #define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
 #define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */
+
+#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
 
 #define MAX_IMG_WIDTH 0x3fff
 #define MAX_IMG_HEIGHT 0x3fff
 
 #define CRTC_DUAL_MIXERS	2
 
+#define SDE_COLOR_PROCESS_VER(MAJOR, MINOR) \
+		((((MAJOR) & 0xFFFF) << 16) | (((MINOR) & 0xFFFF)))
+#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
+#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+
 /**
  * MDP TOP BLOCK features
  * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
@@ -72,8 +85,8 @@
  * @SDE_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
  * @SDE_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
  * @SDE_SSPP_CSC,            Support of Color space converion
- * @SDE_SSPP_PA_V1,          Common op-mode register for PA blocks
- * @SDE_SSPP_HIST_V1         Histogram programming method V1
+ * @SDE_SSPP_HSIC,           Global HSIC control
+ * @SDE_SSPP_MEMCOLOR        Memory Color Support
  * @SDE_SSPP_IGC,            Inverse gamma correction
  * @SDE_SSPP_PCC,            Color correction support
  * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
@@ -86,8 +99,8 @@
 	SDE_SSPP_SCALER_QSEED3,
 	SDE_SSPP_SCALER_RGB,
 	SDE_SSPP_CSC,
-	SDE_SSPP_PA_V1, /* Common op-mode register for PA blocks */
-	SDE_SSPP_HIST_V1,
+	SDE_SSPP_HSIC,
+	SDE_SSPP_MEMCOLOR,
 	SDE_SSPP_IGC,
 	SDE_SSPP_PCC,
 	SDE_SSPP_CURSOR,
@@ -114,20 +127,28 @@
  * @SDE_DSPP_IGC             DSPP Inverse gamma correction block
  * @SDE_DSPP_PCC             Panel color correction block
  * @SDE_DSPP_GC              Gamma correction block
- * @SDE_DSPP_PA              Picture adjustment block
+ * @SDE_DSPP_HSIC            Global HSIC block
+ * @SDE_DSPP_MEMCOLOR        Memory Color block
+ * @SDE_DSPP_SIXZONE         Six zone block
  * @SDE_DSPP_GAMUT           Gamut bloc
  * @SDE_DSPP_DITHER          Dither block
- * @SDE_DSPP_HIST            Histogram bloc
+ * @SDE_DSPP_HIST            Histogram block
+ * @SDE_DSPP_VLUT            PA VLUT block
+ * @SDE_DSPP_AD              AD block
  * @SDE_DSPP_MAX             maximum value
  */
 enum {
 	SDE_DSPP_IGC = 0x1,
 	SDE_DSPP_PCC,
 	SDE_DSPP_GC,
-	SDE_DSPP_PA,
+	SDE_DSPP_HSIC,
+	SDE_DSPP_MEMCOLOR,
+	SDE_DSPP_SIXZONE,
 	SDE_DSPP_GAMUT,
 	SDE_DSPP_DITHER,
 	SDE_DSPP_HIST,
+	SDE_DSPP_VLUT,
+	SDE_DSPP_AD,
 	SDE_DSPP_MAX
 };
 
@@ -172,8 +193,11 @@
  * @SDE_WB_TRAFFIC_SHAPER,  Writeback traffic shaper bloc
  * @SDE_WB_UBWC_1_0,        Writeback Universal bandwidth compression 1.0
  *                          support
- * @SDE_WB_WBWC_1_5         UBWC 1.5 support
+ * @SDE_WB_UBWC_1_5         UBWC 1.5 support
  * @SDE_WB_YUV_CONFIG       Writeback supports output of YUV colorspace
+ * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
+ * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
+ *                          the destination image
  * @SDE_WB_MAX              maximum value
  */
 enum {
@@ -187,6 +211,8 @@
 	SDE_WB_TRAFFIC_SHAPER,
 	SDE_WB_UBWC_1_0,
 	SDE_WB_YUV_CONFIG,
+	SDE_WB_PIPE_ALPHA,
+	SDE_WB_XY_ROI_OFFSET,
 	SDE_WB_MAX
 };
 
@@ -204,12 +230,14 @@
  * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
  * @id:                enum identifying this block
  * @base:              register base offset to mdss
+ * @len:               length of hardware block
  * @features           bit mask identifying sub-blocks/features
  */
 #define SDE_HW_BLK_INFO \
 	u32 id; \
 	u32 base; \
-	unsigned long features
+	u32 len; \
+	unsigned long features; \
 
 /**
  * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
@@ -282,9 +310,10 @@
  * @src_blk:
  * @scaler_blk:
  * @csc_blk:
- * @pa_blk:
- * @hist_lut:
+ * @hsic:
+ * @memcolor:
  * @pcc_blk:
+ * @igc_blk:
  * @format_list: Pointer to list of supported formats
  */
 struct sde_sspp_sub_blks {
@@ -306,9 +335,10 @@
 	struct sde_src_blk src_blk;
 	struct sde_scaler_blk scaler_blk;
 	struct sde_pp_blk csc_blk;
-	struct sde_pp_blk pa_blk;
-	struct sde_pp_blk hist_lut;
+	struct sde_pp_blk hsic;
+	struct sde_pp_blk memcolor;
 	struct sde_pp_blk pcc_blk;
+	struct sde_pp_blk igc_blk;
 
 	const struct sde_format_extended *format_list;
 };
@@ -318,21 +348,27 @@
  * @maxwidth:               Max pixel width supported by this mixer
  * @maxblendstages:         Max number of blend-stages supported
  * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
  */
 struct sde_lm_sub_blks {
 	u32 maxwidth;
 	u32 maxblendstages;
 	u32 blendstage_base[MAX_BLOCKS];
+	struct sde_pp_blk gc;
 };
 
 struct sde_dspp_sub_blks {
 	struct sde_pp_blk igc;
 	struct sde_pp_blk pcc;
 	struct sde_pp_blk gc;
-	struct sde_pp_blk pa;
+	struct sde_pp_blk hsic;
+	struct sde_pp_blk memcolor;
+	struct sde_pp_blk sixzone;
 	struct sde_pp_blk gamut;
 	struct sde_pp_blk dither;
 	struct sde_pp_blk hist;
+	struct sde_pp_blk ad;
+	struct sde_pp_blk vlut;
 };
 
 struct sde_pingpong_sub_blks {
@@ -514,16 +550,6 @@
 };
 
 /**
- * struct sde_ad_cfg - information of Assertive Display blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- */
-struct sde_ad_cfg {
-	SDE_HW_BLK_INFO;
-};
-
-/**
  * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
  * @pps                pixel per seconds
  * @ot_limit           OT limit to use up to specified pixel per second
@@ -541,7 +567,7 @@
  */
 struct sde_vbif_dynamic_ot_tbl {
 	u32 count;
-	const struct sde_vbif_dynamic_ot_cfg *cfg;
+	struct sde_vbif_dynamic_ot_cfg *cfg;
 };
 
 /**
@@ -569,10 +595,29 @@
  * This is the main catalog data structure representing
  * this HW version. Contains number of instances,
  * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @max_sspp_linewidth max source pipe line width support.
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @max_wb_linewidth   max writeback line width support.
+ * @highest_bank_bit   highest memory bit setting for tile buffers.
+ * @qseed_type         qseed2 or qseed3 support.
+ * @has_src_split      source split feature status
+ * @has_cdp            Client driver prefetch feature status
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
 
+	u32 max_sspp_linewidth;
+	u32 max_mixer_width;
+	u32 max_mixer_blendstages;
+	u32 max_wb_linewidth;
+	u32 highest_bank_bit;
+	u32 qseed_type;
+	bool has_src_split;
+	bool has_cdp;
+
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
 
@@ -583,7 +628,7 @@
 	struct sde_ctl_cfg ctl[MAX_BLOCKS];
 
 	u32 sspp_count;
-	struct sde_sspp_cfg sspp[MAX_LAYERS];
+	struct sde_sspp_cfg sspp[MAX_BLOCKS];
 
 	u32 mixer_count;
 	struct sde_lm_cfg mixer[MAX_BLOCKS];
@@ -603,9 +648,6 @@
 	u32 wb_count;
 	struct sde_wb_cfg wb[MAX_BLOCKS];
 
-	u32 ad_count;
-	struct sde_ad_cfg ad[MAX_BLOCKS];
-
 	u32 vbif_count;
 	struct sde_vbif_cfg vbif[MAX_BLOCKS];
 	/* Add additional block data structures here */
@@ -634,7 +676,14 @@
 #define BLK_WB(s) ((s)->wb)
 #define BLK_AD(s) ((s)->ad)
 
-struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step);
-struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step);
+/**
+ * sde_hw_catalog_init() - sde hardware catalog init API parses dtsi property
+ * and stores all parsed offset, hardware capabilities in config structure.
+ * @dev:          drm device node.
+ * @hw_rev:       caller needs provide the hardware revision before parsing.
+ *
+ * Return: parsed sde config structure
+ */
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
 
 #endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
index a397d67..1cc63ff 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,9 +17,9 @@
 /* VIG layer capability */
 #define VIG_17X_MASK \
 	(BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALER_QSEED2) |\
-	BIT(SDE_SSPP_CSC) | BIT(SDE_SSPP_PA_V1) |\
-	BIT(SDE_SSPP_HIST_V1) | BIT(SDE_SSPP_PCC) |\
-	BIT(SDE_SSPP_IGC) | BIT(SDE_SSPP_QOS))
+	BIT(SDE_SSPP_CSC) | BIT(SDE_SSPP_HSIC) |\
+	BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) |\
+	BIT(SDE_SSPP_MEMCOLOR) | BIT(SDE_SSPP_QOS))
 
 /* RGB layer capability */
 #define RGB_17X_MASK \
@@ -28,8 +28,8 @@
 
 /* DMA layer capability */
 #define DMA_17X_MASK \
-	(BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_PA_V1) |\
-	BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) | BIT(SDE_SSPP_QOS))
+	(BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) |\
+	BIT(SDE_SSPP_QOS))
 
 /* Cursor layer capability */
 #define CURSOR_17X_MASK  (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_CURSOR))
@@ -39,8 +39,9 @@
 
 #define DSPP_17X_MASK \
 	(BIT(SDE_DSPP_IGC) | BIT(SDE_DSPP_PCC) |\
-	BIT(SDE_DSPP_GC) | BIT(SDE_DSPP_PA) | BIT(SDE_DSPP_GAMUT) |\
-	BIT(SDE_DSPP_DITHER) | BIT(SDE_DSPP_HIST))
+	BIT(SDE_DSPP_GC) | BIT(SDE_DSPP_HSIC) | BIT(SDE_DSPP_GAMUT) |\
+	BIT(SDE_DSPP_DITHER) | BIT(SDE_DSPP_HIST) | BIT(SDE_DSPP_MEMCOLOR) |\
+	BIT(SDE_DSPP_SIXZONE) | BIT(SDE_DSPP_AD) | BIT(SDE_DSPP_VLUT))
 
 #define PINGPONG_17X_MASK \
 	(BIT(SDE_PINGPONG_TE) | BIT(SDE_PINGPONG_DSC))
@@ -254,13 +255,15 @@
 			.base = 0x200, .len = 0x70,},
 		.csc_blk = {.id = SDE_SSPP_CSC,
 			.base = 0x320, .len = 0x44,},
-		.pa_blk = {.id = SDE_SSPP_PA_V1,
-			.base = 0x200, .len = 0x0,},
-		.hist_lut = {.id = SDE_SSPP_HIST_V1,
-			.base = 0xA00, .len = 0x400,},
-		.pcc_blk = {.id = SDE_SSPP_PCC,
-			.base = 0x1780, .len = 0x64,},
 		.format_list = plane_formats_yuv,
+		.igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.hsic = {.id = SDE_SSPP_HSIC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.memcolor = {.id = SDE_SSPP_MEMCOLOR, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 	};
 
 	static const struct sde_sspp_sub_blks layer = {
@@ -284,13 +287,11 @@
 			.base = 0x200, .len = 0x70,},
 		.csc_blk = {.id = SDE_SSPP_CSC,
 			.base = 0x320, .len = 0x44,},
-		.pa_blk = {.id = SDE_SSPP_PA_V1,
-			.base = 0x200, .len = 0x0,},
-		.hist_lut = {.id = SDE_SSPP_HIST_V1,
-			.base = 0xA00, .len = 0x400,},
-		.pcc_blk = {.id = SDE_SSPP_PCC,
-			.base = 0x1780, .len = 0x64,},
 		.format_list = plane_formats,
+		.igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 	};
 
 	static const struct sde_sspp_sub_blks dma = {
@@ -311,23 +312,21 @@
 		.src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x150,},
 		.scaler_blk = {.id = 0, .base = 0x00, .len = 0x0,},
 		.csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
-		.pa_blk = {.id = 0, .base = 0x200, .len = 0x0,},
-		.hist_lut = {.id = 0, .base = 0xA00, .len = 0x0,},
-		.pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x01780, .len = 0x64,},
 		.format_list = plane_formats,
+		.igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 	};
 
 	static const struct sde_sspp_sub_blks cursor = {
 		.maxlinewidth = 128,
 		.maxdwnscale = 1, .maxupscale = 1,
-		.maxhdeciexp = DECIMATION_17X_MAX_H,
-		.maxvdeciexp = DECIMATION_17X_MAX_V,
+		.maxhdeciexp = 0,
+		.maxvdeciexp = 0,
 		.src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x150,},
 		.scaler_blk = {.id = 0, .base = 0x00, .len = 0x0,},
 		.csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
-		.pa_blk = {.id = 0, .base = 0x00, .len = 0x0,},
-		.hist_lut = {.id = 0, .base = 0x00, .len = 0x0,},
-		.pcc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
 		.format_list = plane_formats,
 	};
 
@@ -336,23 +335,35 @@
 		.maxwidth = 2560,
 		.maxblendstages = 7, /* excluding base layer */
 		.blendstage_base = { /* offsets relative to mixer base */
-			0x20, 0x50, 0x80, 0xB0, 0x230, 0x260, 0x290 }
+			0x20, 0x50, 0x80, 0xB0, 0x230, 0x260, 0x290 },
+		.gc = {.id = SDE_DSPP_GC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 	};
 
 	/* DSPP capability */
 	static const struct sde_dspp_sub_blks dspp = {
-			.igc = {.id = SDE_DSPP_GC, .base = 0x17c0, .len = 0x0,
-				.version = 0x1},
-		.pcc = {.id = SDE_DSPP_PCC, .base = 0x00, .len = 0x0,
-			.version = 0x1},
-		.gamut = {.id = SDE_DSPP_GAMUT, .base = 0x01600, .len = 0x0,
-			.version = 0x1},
-		.dither = {.id = SDE_DSPP_DITHER, .base = 0x00, .len = 0x0,
-			.version = 0x1},
-		.pa = {.id = SDE_DSPP_PA, .base = 0x00, .len = 0x0,
-			.version = 0x1},
+		.igc = {.id = SDE_DSPP_IGC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.pcc = {.id = SDE_DSPP_PCC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.gamut = {.id = SDE_DSPP_GAMUT, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.dither = {.id = SDE_DSPP_DITHER, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.hsic = {.id = SDE_DSPP_HSIC, .base = 0x00, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.memcolor = {.id = SDE_DSPP_MEMCOLOR, .base = 0x00, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.sixzone = {.id = SDE_DSPP_SIXZONE, .base = 0x00, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 		.hist = {.id = SDE_DSPP_HIST, .base = 0x00, .len = 0x0,
-			.version = 0x1},
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.gc = {.id = SDE_DSPP_GC, .base = 0x0, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
+		.ad = {.id = SDE_DSPP_AD, .base = 0x00, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x3, 0x0)},
+		.vlut = {.id = SDE_DSPP_VLUT, .base = 0x1400, .len = 0x0,
+			.version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
 	};
 
 	/* PINGPONG capability */
@@ -600,11 +611,6 @@
 				.xin_id = 6,
 				.clk_ctrl = SDE_CLK_CTRL_WB2},
 		},
-		.ad_count = 2,
-		.ad = {
-			{.id = AD_0, .base = 0x00079000},
-			{.id = AD_1, .base = 0x00079800},
-		},
 		.vbif_count = 2,
 		.vbif = {
 			{.id = VBIF_0,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
new file mode 100644
index 0000000..240c81c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+
+static const struct sde_format_extended plane_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{0, 0},
+};
+
+static const struct sde_format_extended plane_formats_yuv[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV21, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_NV61, 0},
+	{DRM_FORMAT_VYUY, 0},
+	{DRM_FORMAT_UYVY, 0},
+	{DRM_FORMAT_YUYV, 0},
+	{DRM_FORMAT_YVYU, 0},
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_YVU420, 0},
+	{0, 0},
+};
+
+static const struct sde_format_extended wb2_formats[] = {
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_YUYV, 0},
+
+	{0, 0},
+};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index ea0567a..2f1bac7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -21,7 +21,6 @@
 #define   CTL_TOP                       0x014
 #define   CTL_FLUSH                     0x018
 #define   CTL_START                     0x01C
-#define   CTL_PACK_3D                   0x020
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 
@@ -78,6 +77,14 @@
 	ctx->pending_flush_mask |= flushbits;
 }
 
+static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0x0;
+
+	return ctx->pending_flush_mask;
+}
+
 static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
 {
 	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
@@ -377,6 +384,7 @@
 {
 	ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
 	ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
+	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
 	ops->trigger_flush = sde_hw_ctl_trigger_flush;
 	ops->trigger_start = sde_hw_ctl_trigger_start;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 46f9a24..2f9ff5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -72,6 +72,13 @@
 	void (*clear_pending_flush)(struct sde_hw_ctl *ctx);
 
 	/**
+	 * Query the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	u32 (*get_pending_flush)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * OR in the given flushbits to the cached pending_flush_mask
 	 * No effect on hardware
 	 * @ctx       : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 62f2ebb..82ca83f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,12 +9,47 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
+#include "drm/msm_drm_pp.h"
 #include "sde_hw_mdss.h"
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_dspp.h"
 
+#define PCC_ENABLE BIT(0)
+#define PCC_OP_MODE_OFF 0
+#define PCC_CONST_COEFF_OFF 4
+#define PCC_R_COEFF_OFF 0x10
+#define PCC_G_COEFF_OFF 0x1C
+#define PCC_B_COEFF_OFF 0x28
+#define PCC_RG_COEFF_OFF 0x34
+#define PCC_RB_COEFF_OFF 0x40
+#define PCC_GB_COEFF_OFF 0x4C
+#define PCC_RGB_COEFF_OFF 0x58
+#define PCC_CONST_COEFF_MASK 0xFFFF
+#define PCC_COEFF_MASK 0x3FFFF
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define PA_SZ_VAL_MASK   BIT(31)
+#define PA_SZ_SAT_MASK   BIT(30)
+#define PA_SZ_HUE_MASK   BIT(29)
+#define PA_CONT_MASK     BIT(28)
+#define PA_VAL_MASK      BIT(27)
+#define PA_SAT_MASK      BIT(26)
+#define PA_HUE_MASK      BIT(25)
+#define PA_LUTV_MASK     BIT(19)
+#define PA_HIST_MASK     BIT(16)
+#define PA_MEM_SKY_MASK  BIT(7)
+#define PA_MEM_FOL_MASK  BIT(6)
+#define PA_MEM_SKIN_MASK BIT(5)
+#define PA_ENABLE        BIT(20)
+
+#define PA_ENABLE_MASK (PA_SZ_VAL_MASK | PA_SZ_SAT_MASK | PA_SZ_HUE_MASK \
+			| PA_CONT_MASK | PA_VAL_MASK | PA_SAT_MASK \
+			| PA_HUE_MASK | PA_LUTV_MASK | PA_HIST_MASK \
+			| PA_MEM_SKY_MASK | PA_MEM_FOL_MASK | PA_MEM_SKIN_MASK)
+
+#define PA_LUT_SWAP_OFF 0x234
+
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -51,8 +86,139 @@
 {
 }
 
+void sde_dspp_setup_hue(struct sde_hw_dspp *dspp, void *cfg)
+{
+}
+
+void sde_dspp_setup_vlut(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_pa_vlut *payload = NULL;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	u32 op_mode, tmp;
+	int i = 0, j = 0;
+
+	if (!hw_cfg  || (hw_cfg->payload && hw_cfg->len !=
+			sizeof(struct drm_msm_pa_vlut))) {
+		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			  ((hw_cfg) ? hw_cfg->len : 0),
+			  sizeof(struct drm_msm_pa_vlut));
+		return;
+	}
+	op_mode = SDE_REG_READ(&ctx->hw, 0);
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable vlut feature\n");
+		/**
+		 * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
+		 * first, then check whether any other PA sub-features are
+		 * enabled or not. If none of the sub-features are enabled,
+		 * remove the PA global enable bit(20).
+		 */
+		op_mode &= ~((u32)PA_LUTV_MASK);
+		if (!(op_mode & PA_ENABLE_MASK))
+			op_mode &= ~((u32)PA_ENABLE);
+		SDE_REG_WRITE(&ctx->hw, 0, op_mode);
+		return;
+	}
+	payload = hw_cfg->payload;
+	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+		tmp = (payload->val[i] & REG_MASK(10)) |
+			((payload->val[i + 1] & REG_MASK(10)) << 16);
+		SDE_REG_WRITE(&ctx->hw, (ctx->cap->sblk->vlut.base + j),
+			     tmp);
+	}
+	SDE_REG_WRITE(&ctx->hw, PA_LUT_SWAP_OFF, 1);
+	op_mode |= PA_ENABLE | PA_LUTV_MASK;
+	SDE_REG_WRITE(&ctx->hw, 0, op_mode);
+}
+
 void sde_dspp_setup_pcc(struct sde_hw_dspp *ctx, void *cfg)
 {
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	struct drm_msm_pcc *pcc;
+	void  __iomem *base;
+
+	if (!hw_cfg  || (hw_cfg->len != sizeof(*pcc)  && hw_cfg->payload)) {
+		DRM_ERROR(
+			"hw_cfg %pK payload %pK payload size %d exp size %zd\n",
+			hw_cfg, (hw_cfg ? hw_cfg->payload : NULL),
+			((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
+		return;
+	}
+	base = ctx->hw.base_off + ctx->cap->base;
+
+	/* Turn off feature */
+	if (!hw_cfg->payload) {
+		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
+			      PCC_OP_MODE_OFF);
+		return;
+	}
+	DRM_DEBUG_DRIVER("Enable PCC feature\n");
+	pcc = hw_cfg->payload;
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
+				  pcc->r.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
+		      pcc->g.c & PCC_CONST_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
+		      pcc->b.c & PCC_CONST_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
+				  pcc->r.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
+				  pcc->g.r & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
+				  pcc->b.r & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
+				  pcc->r.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
+				  pcc->g.g & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
+				  pcc->b.g & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
+				  pcc->r.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
+				  pcc->g.b & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
+				  pcc->b.b & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
+				  pcc->r.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
+				  pcc->g.rg & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
+				  pcc->b.rg & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
+				  pcc->r.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
+				  pcc->g.rb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
+				  pcc->b.rb & PCC_COEFF_MASK);
+
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
+				  pcc->r.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
+				  pcc->g.gb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
+				  pcc->b.gb & PCC_COEFF_MASK);
+
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
+				  pcc->r.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
+		      pcc->g.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw,
+		      ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
+		      pcc->b.rgb & PCC_COEFF_MASK);
+	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, PCC_ENABLE);
 }
 
 void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
@@ -75,10 +241,36 @@
 {
 }
 
-static void _setup_dspp_ops(struct sde_hw_dspp_ops *ops,
-		unsigned long features)
+static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
 {
+	int i = 0;
+
+	for (i = 0; i < SDE_DSPP_MAX; i++) {
+		if (!test_bit(i, &features))
+			continue;
+		switch (i) {
+		case SDE_DSPP_PCC:
+			if (c->cap->sblk->pcc.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x0)))
+				c->ops.setup_pcc = sde_dspp_setup_pcc;
+			break;
+		case SDE_DSPP_HSIC:
+			if (c->cap->sblk->hsic.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x0)))
+				c->ops.setup_hue = sde_dspp_setup_hue;
+			break;
+		case SDE_DSPP_VLUT:
+			if (c->cap->sblk->vlut.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x0))) {
+				c->ops.setup_vlut = sde_dspp_setup_vlut;
+			}
+		default:
+			break;
+		}
+
+	}
 }
+
 struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
 			void __iomem *addr,
 			struct sde_mdss_cfg *m)
@@ -99,7 +291,7 @@
 	/* Assign ops */
 	c->idx = idx;
 	c->cap = cfg;
-	_setup_dspp_ops(&c->ops, c->cap->features);
+	_setup_dspp_ops(c, c->cap->features);
 
 	return c;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 6ba161a..6ffc4b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -83,12 +83,48 @@
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
+
 	/**
 	 * setup_dither - setup dspp dither
 	 * @ctx: Pointer to dspp context
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_hue - setup dspp PA hue
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_hue)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_sat - setup dspp PA saturation
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_sat)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_val - setup dspp PA value
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_val)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_cont - setup dspp PA contrast
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_cont)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_vlut - setup dspp PA VLUT
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 452c8783..592c490 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
 
 /* These register are offset to mixer base + stage base */
 #define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
 #define LM_BLEND0_FG_ALPHA               0x04
 #define LM_BLEND0_BG_ALPHA               0x08
 
@@ -71,16 +72,19 @@
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	u32 outsize;
-	u32 opmode;
+	u32 op_mode;
 
-	opmode = SDE_REG_READ(c, LM_OP_MODE);
+	op_mode = SDE_REG_READ(c, LM_OP_MODE);
 
 	outsize = mixer->out_height << 16 | mixer->out_width;
 	SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
 
 	/* SPLIT_LEFT_RIGHT */
-	opmode = (opmode & ~(1 << 31)) | ((mixer->right_mixer) ? (1 << 31) : 0);
-	SDE_REG_WRITE(c, LM_OP_MODE, opmode);
+	if (mixer->right_mixer)
+		op_mode |= BIT(31);
+	else
+		op_mode &= ~BIT(31);
+	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
 }
 
 static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
@@ -99,6 +103,25 @@
 	}
 }
 
+static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+	u32 const_alpha;
+
+	if (stage == SDE_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
 static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
 	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
 {
@@ -126,24 +149,28 @@
 	/* read the existing op_mode configuration */
 	op_mode = SDE_REG_READ(c, LM_OP_MODE);
 
-	op_mode |= mixer_op_mode;
+	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
 
 	SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
 }
 
-static void sde_hw_lm_gammacorrection(struct sde_hw_mixer *mixer,
+static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
 			void *cfg)
 {
 }
 
-static void _setup_mixer_ops(struct sde_hw_lm_ops *ops,
+static void _setup_mixer_ops(struct sde_mdss_cfg *m,
+		struct sde_hw_lm_ops *ops,
 		unsigned long cap)
 {
 	ops->setup_mixer_out = sde_hw_lm_setup_out;
-	ops->setup_blend_config = sde_hw_lm_setup_blend_config;
+	if (IS_MSMSKUNK_TARGET(m->hwversion))
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+	else
+		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
 	ops->setup_alpha_out = sde_hw_lm_setup_color3;
 	ops->setup_border_color = sde_hw_lm_setup_border_color;
-	ops->setup_gammcorrection = sde_hw_lm_gammacorrection;
+	ops->setup_gc = sde_hw_lm_gc;
 };
 
 struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
@@ -166,7 +193,7 @@
 	/* Assign ops */
 	c->idx = idx;
 	c->cap = cfg;
-	_setup_mixer_ops(&c->ops, c->cap->features);
+	_setup_mixer_ops(m, &c->ops, c->cap->features);
 
 	/*
 	 * Perform any default initialization for the sspp blocks
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 1bde86e..ca671f8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,8 +60,10 @@
 	void (*setup_border_color)(struct sde_hw_mixer *ctx,
 		struct sde_mdss_color *color,
 		u8 border_en);
-
-	void (*setup_gammcorrection)(struct sde_hw_mixer *mixer,
+	/**
+	 * setup_gc : enable/disable gamma correction feature
+	 */
+	void (*setup_gc)(struct sde_hw_mixer *mixer,
 			void *cfg);
 
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index aaad568..d912f31 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -178,6 +178,7 @@
 	INTF_HDMI = 0x3,
 	INTF_LCDC = 0x5,
 	INTF_EDP = 0x9,
+	INTF_DP = 0xa,
 	INTF_TYPE_MAX,
 
 	/* virtual interfaces */
@@ -424,4 +425,14 @@
 #define SDE_DBG_MASK_TOP      (1 << 9)
 #define SDE_DBG_MASK_VBIF     (1 << 10)
 
+/**
+ * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
+ * @payload: Feature specific payload.
+ * @len: Length of the payload.
+ */
+struct sde_hw_cp_cfg {
+	void *payload;
+	u32 len;
+};
+
 #endif  /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index b06834a..a478a7c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -125,15 +125,15 @@
 	case SDE_SSPP_CSC:
 		*idx = sblk->csc_blk.base;
 		break;
-	case SDE_SSPP_PA_V1:
-		*idx = sblk->pa_blk.base;
-		break;
-	case SDE_SSPP_HIST_V1:
-		*idx = sblk->hist_lut.base;
+	case SDE_SSPP_HSIC:
+		*idx = sblk->hsic.base;
 		break;
 	case SDE_SSPP_PCC:
 		*idx = sblk->pcc_blk.base;
 		break;
+	case SDE_SSPP_MEMCOLOR:
+		*idx = sblk->memcolor.base;
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -148,8 +148,7 @@
 	u32 opmode;
 
 	if (!_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) &&
-			(test_bit(SDE_SSPP_CSC, &ctx->cap->features) ||
-			 test_bit(SDE_SSPP_PA_V1, &ctx->cap->features))) {
+			test_bit(SDE_SSPP_CSC, &ctx->cap->features)) {
 		opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
 
 		if (en)
@@ -526,9 +525,8 @@
 	if (test_bit(SDE_SSPP_CSC, &features))
 		ops->setup_csc = sde_hw_sspp_setup_csc;
 
-	if (test_bit(SDE_SSPP_PA_V1, &features)) {
+	if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
 		ops->setup_sharpening = sde_hw_sspp_setup_sharpening;
-	}
 }
 
 static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 8feca9b..10d3917 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -14,17 +14,14 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_top.h"
 
-#define SSPP_SPARE                        0x24
-#define SPLIT_DISPLAY_ENABLE              0x2F4
+#define SSPP_SPARE                        0x28
 
-#define LOWER_PIPE_CTRL                   0x2F8
 #define FLD_SPLIT_DISPLAY_CMD             BIT(1)
 #define FLD_SMART_PANEL_FREE_RUN          BIT(2)
 #define FLD_INTF_1_SW_TRG_MUX             BIT(4)
 #define FLD_INTF_2_SW_TRG_MUX             BIT(8)
 #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
 
-#define UPPER_PIPE_CTRL                   0x3F0
 #define TE_LINE_INTERVAL                  0x3F4
 
 #define TRAFFIC_SHAPER_EN                 BIT(31)
@@ -65,9 +62,9 @@
 	}
 
 	SDE_REG_WRITE(c, SSPP_SPARE, (cfg->split_flush_en) ? 0x1 : 0x0);
-	SDE_REG_WRITE(c, LOWER_PIPE_CTRL, lower_pipe);
-	SDE_REG_WRITE(c, UPPER_PIPE_CTRL, upper_pipe);
-	SDE_REG_WRITE(c, SPLIT_DISPLAY_ENABLE, cfg->en & 0x1);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+	SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
 }
 
 static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
@@ -84,30 +81,6 @@
 	SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
 }
 
-static void sde_hw_setup_traffic_shaper(struct sde_hw_mdp *mdp,
-		struct traffic_shaper_cfg *cfg)
-{
-	struct sde_hw_blk_reg_map *c = &mdp->hw;
-	u32 ts_control = 0;
-	u32 offset;
-	u64 bpc;
-
-	if (cfg->rd_client)
-		offset = TRAFFIC_SHAPER_RD_CLIENT(cfg->client_id);
-	else
-		offset = TRAFFIC_SHAPER_WR_CLIENT(cfg->client_id);
-
-	if (cfg->en) {
-		bpc = cfg->bpc_numer;
-		do_div(bpc, (cfg->bpc_denom >>
-					TRAFFIC_SHAPER_FIXPOINT_FACTOR));
-		ts_control = lower_32_bits(bpc) + 1;
-		ts_control |= TRAFFIC_SHAPER_EN;
-	}
-
-	SDE_REG_WRITE(c, offset, ts_control);
-}
-
 static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
 		enum sde_clk_ctrl_type clk_ctrl, bool enable)
 {
@@ -141,7 +114,6 @@
 {
 	ops->setup_split_pipe = sde_hw_setup_split_pipe_control;
 	ops->setup_cdm_output = sde_hw_setup_cdm_output;
-	ops->setup_traffic_shaper = sde_hw_setup_traffic_shaper;
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 }
 
@@ -169,34 +141,31 @@
 		void __iomem *addr,
 		const struct sde_mdss_cfg *m)
 {
-	static struct sde_hw_mdp *c;
+	struct sde_hw_mdp *mdp;
 	const struct sde_mdp_cfg *cfg;
 
-	/* mdp top is singleton */
-	if (c)
-		return c;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
+	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
 		return ERR_PTR(-ENOMEM);
 
-	cfg = _top_offset(idx, m, addr, &c->hw);
+	cfg = _top_offset(idx, m, addr, &mdp->hw);
 	if (IS_ERR_OR_NULL(cfg)) {
-		kfree(c);
+		kfree(mdp);
 		return ERR_PTR(-EINVAL);
 	}
 
 	/*
 	 * Assign ops
 	 */
-	c->idx = idx;
-	c->cap = cfg;
-	_setup_mdp_ops(&c->ops, c->cap->features);
+	mdp->idx = idx;
+	mdp->cap = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->cap->features);
 
 	/*
 	 * Perform any default initialization for the intf
 	 */
-	return c;
+
+	return mdp;
 }
 
 void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
index 51d8c2e..6f52f31 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c
@@ -9,7 +9,9 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include "msm_drv.h"
+#include "sde_kms.h"
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
 
@@ -23,7 +25,8 @@
 {
 	/* don't need to mutex protect this */
 	if (c->log_mask & sde_hw_util_log_mask)
-		DBG("[%s:0x%X] <= 0x%X", name, c->blk_off + reg_off, val);
+		SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+				name, c->blk_off + reg_off, val);
 	writel_relaxed(val, c->base_off + c->blk_off + reg_off);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index a776d55..c68ee23 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -43,6 +43,8 @@
 #define WB_CSC_BASE			0x260
 #define WB_DST_ADDR_SW_STATUS		0x2B0
 #define WB_CDP_CTRL			0x2B4
+#define WB_OUT_IMAGE_SIZE		0x2C0
+#define WB_OUT_XY			0x2C4
 
 static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
 		struct sde_mdss_cfg *m,
@@ -96,7 +98,8 @@
 
 	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
 		dst_format |= BIT(8); /* DSTC3_EN */
-		if (!fmt->alpha_enable)
+		if (!fmt->alpha_enable ||
+				!(ctx->caps->features & BIT(SDE_WB_PIPE_ALPHA)))
 			dst_format |= BIT(14); /* DST_ALPHA_X */
 	}
 
@@ -118,7 +121,11 @@
 			(data->dest.plane_pitch[1] << 16);
 	ystride1 = data->dest.plane_pitch[2] |
 			(data->dest.plane_pitch[3] << 16);
-	outsize = (data->dest.height << 16) | data->dest.width;
+
+	if (data->roi.h && data->roi.w)
+		outsize = (data->roi.h << 16) | data->roi.w;
+	else
+		outsize = (data->dest.height << 16) | data->dest.width;
 
 	if (SDE_FORMAT_IS_UBWC(fmt)) {
 		opmode |= BIT(0);
@@ -155,12 +162,18 @@
 	SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
 }
 
-static void sde_hw_wb_traffic_shaper(struct sde_hw_wb *ctx,
-		struct sde_hw_wb_cfg *data)
+static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
 {
-	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_traffic_shaper)
-		ctx->hw_mdp->ops.setup_traffic_shaper(ctx->hw_mdp,
-				&data->ts_cfg);
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 image_size, out_size, out_xy;
+
+	image_size = (wb->dest.height << 16) | wb->dest.width;
+	out_xy = (wb->roi.y << 16) | wb->roi.x;
+	out_size = (wb->roi.h << 16) | wb->roi.w;
+
+	SDE_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+	SDE_REG_WRITE(c, WB_OUT_XY, out_xy);
+	SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
 }
 
 static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
@@ -169,8 +182,8 @@
 	ops->setup_outaddress = sde_hw_wb_setup_outaddress;
 	ops->setup_outformat = sde_hw_wb_setup_format;
 
-	if (test_bit(SDE_WB_TRAFFIC_SHAPER, &features))
-		ops->setup_trafficshaper = sde_hw_wb_traffic_shaper;
+	if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
+		ops->setup_roi = sde_hw_wb_roi;
 }
 
 struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index e01750b..52a5ee5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -24,6 +24,7 @@
 	struct sde_hw_fmt_layout dest;
 	enum sde_intf_mode intf_mode;
 	struct traffic_shaper_cfg ts_cfg;
+	struct sde_rect roi;
 	bool is_secure;
 };
 
@@ -53,6 +54,9 @@
 
 	void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
 		struct sde_hw_wb_cfg *wb);
+
+	void (*setup_roi)(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cfg *wb);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 2604000..650ef84 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,8 +32,6 @@
 		"mdp_0",
 };
 
-#define DEFAULT_MDP_SRC_CLK 300000000
-
 /**
  * Controls size of event log buffer. Specified as a power of 2.
  */
@@ -51,17 +49,76 @@
 #define SDE_DEBUGFS_DIR "msm_sde"
 #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
 
+/**
+ * sdecustom - enable certain driver customizations for sde clients
+ *	Enabling this modifies the standard DRM behavior slightly and assumes
+ *	that the clients have specific knowledge about the modifications that
+ *	are involved, so don't enable this unless you know what you're doing.
+ *
+ *	Parts of the driver that are affected by this setting may be located by
+ *	searching for invocations of the 'sde_is_custom_client()' function.
+ *
+ *	This is disabled by default.
+ */
+static bool sdecustom = true;
+module_param(sdecustom, bool, 0400);
+MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
+
+bool sde_is_custom_client(void)
+{
+	return sdecustom;
+}
+
 static int sde_debugfs_show_regset32(struct seq_file *s, void *data)
 {
-	struct sde_debugfs_regset32 *regset = s->private;
+	struct sde_debugfs_regset32 *regset;
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
 	void __iomem *base;
-	int i;
+	uint32_t i, addr;
 
-	base = regset->base + regset->offset;
+	if (!s || !s->private)
+		return 0;
 
-	for (i = 0; i < regset->blk_len; i += 4)
-		seq_printf(s, "[%x] 0x%08x\n",
-				regset->offset + i, readl_relaxed(base + i));
+	regset = s->private;
+
+	sde_kms = regset->sde_kms;
+	if (!sde_kms || !sde_kms->mmio)
+		return 0;
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return 0;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return 0;
+
+	base = sde_kms->mmio + regset->offset;
+
+	/* insert padding spaces, if needed */
+	if (regset->offset & 0xF) {
+		seq_printf(s, "[%x]", regset->offset & ~0xF);
+		for (i = 0; i < (regset->offset & 0xF); i += 4)
+			seq_puts(s, "         ");
+	}
+
+	if (sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true)) {
+		seq_puts(s, "failed to enable sde clocks\n");
+		return 0;
+	}
+
+	/* main register output */
+	for (i = 0; i < regset->blk_len; i += 4) {
+		addr = regset->offset + i;
+		if ((addr & 0xF) == 0x0)
+			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+		seq_printf(s, " %08x", readl_relaxed(base + i));
+	}
+	seq_puts(s, "\n");
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
 	return 0;
 }
@@ -79,19 +136,19 @@
 };
 
 void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
-		uint32_t offset, uint32_t length, void __iomem *base)
+		uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
 {
 	if (regset) {
 		regset->offset = offset;
 		regset->blk_len = length;
-		regset->base = base;
+		regset->sde_kms = sde_kms;
 	}
 }
 
 void *sde_debugfs_create_regset32(const char *name, umode_t mode,
 		void *parent, struct sde_debugfs_regset32 *regset)
 {
-	if (!name || !regset || !regset->base || !regset->blk_len)
+	if (!name || !regset || !regset->sde_kms || !regset->blk_len)
 		return NULL;
 
 	/* make sure offset is a multiple of 4 */
@@ -137,12 +194,24 @@
 
 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
 	return sde_crtc_vblank(crtc, true);
 }
 
 static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+
 	sde_crtc_vblank(crtc, false);
+
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 }
 
 static void sde_prepare_commit(struct msm_kms *kms,
@@ -197,8 +266,23 @@
 	struct drm_device *dev = crtc->dev;
 	int ret;
 
+	if (!kms || !crtc || !crtc->state) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
 	 /* ref count the vblank event and interrupts while we wait for it */
-	if (drm_crtc_vblank_get(crtc))
+	if (sde_crtc_vblank(crtc, true))
 		return;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -218,7 +302,7 @@
 	}
 
 	 /* release vblank event ref count */
-	drm_crtc_vblank_put(crtc);
+	sde_crtc_vblank(crtc, false);
 }
 
 static void sde_kms_prepare_fence(struct msm_kms *kms,
@@ -238,27 +322,44 @@
 
 static int modeset_init(struct sde_kms *sde_kms)
 {
-	struct drm_device *dev = sde_kms->dev;
+	struct drm_device *dev;
 	struct drm_plane *primary_planes[MAX_PLANES], *plane;
 	struct drm_crtc *crtc;
 
-	struct msm_drm_private *priv = sde_kms->dev->dev_private;
-	struct sde_mdss_cfg *catalog = sde_kms->catalog;
+	struct msm_drm_private *priv;
+	struct sde_mdss_cfg *catalog;
 
-	int primary_planes_idx = 0, i, ret, max_crtc_count;
-	int max_private_planes = catalog->mixer_count;
+	int primary_planes_idx, i, ret;
+	int max_crtc_count, max_plane_count;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+		SDE_ERROR("invalid sde_kms\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev;
+	priv = dev->dev_private;
+	catalog = sde_kms->catalog;
+
+	/* Enumerate displays supported */
+	sde_encoders_init(dev);
+
+	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+	max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
 
 	/* Create the planes */
-	for (i = 0; i < catalog->sspp_count; i++) {
+	primary_planes_idx = 0;
+	for (i = 0; i < max_plane_count; i++) {
 		bool primary = true;
 
 		if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
-			|| primary_planes_idx > max_private_planes)
+			|| primary_planes_idx >= max_crtc_count)
 			primary = false;
 
-		plane = sde_plane_init(dev, catalog->sspp[i].id, primary);
+		plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
+				(1UL << max_crtc_count) - 1);
 		if (IS_ERR(plane)) {
-			DRM_ERROR("sde_plane_init failed\n");
+			SDE_ERROR("sde_plane_init failed\n");
 			ret = PTR_ERR(plane);
 			goto fail;
 		}
@@ -268,15 +369,11 @@
 			primary_planes[primary_planes_idx++] = plane;
 	}
 
-	/* Enumerate displays supported */
-	sde_encoders_init(dev);
-
-	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
 	max_crtc_count = min(max_crtc_count, primary_planes_idx);
 
 	/* Create one CRTC per encoder */
 	for (i = 0; i < max_crtc_count; i++) {
-		crtc = sde_crtc_init(dev, primary_planes[i], i);
+		crtc = sde_crtc_init(dev, primary_planes[i]);
 		if (IS_ERR(crtc)) {
 			ret = PTR_ERR(crtc);
 			goto fail;
@@ -284,6 +381,13 @@
 		priv->crtcs[priv->num_crtcs++] = crtc;
 	}
 
+	if (sde_is_custom_client()) {
+		/* All CRTCs are compatible with all planes */
+		for (i = 0; i < priv->num_planes; i++)
+			priv->planes[i]->possible_crtcs =
+				(1 << priv->num_crtcs) - 1;
+	}
+
 	/* All CRTCs are compatible with all encoders */
 	for (i = 0; i < priv->num_encoders; i++)
 		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
@@ -297,40 +401,33 @@
 {
 	return 0;
 }
+
+static int sde_kms_postinit(struct msm_kms *kms)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+		SDE_ERROR("invalid sde_kms\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev;
+
+	/*
+	 * Allow vblank interrupt to be disabled by drm vblank timer.
+	 */
+	dev->vblank_disable_allowed = true;
+
+	return 0;
+}
+
 static long sde_round_pixclk(struct msm_kms *kms, unsigned long rate,
 		struct drm_encoder *encoder)
 {
 	return rate;
 }
 
-static void sde_postopen(struct msm_kms *kms, struct drm_file *file)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-
-	if (!kms)
-		return;
-
-	sde_kms = to_sde_kms(kms);
-	priv = sde_kms->dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-}
-
-static void sde_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-	struct sde_kms *sde_kms;
-	struct msm_drm_private *priv;
-
-	if (!kms)
-		return;
-
-	sde_kms = to_sde_kms(kms);
-	priv = sde_kms->dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-}
-
 static void sde_destroy(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -349,12 +446,25 @@
 	kfree(sde_kms);
 }
 
+static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	struct sde_kms *sde_kms = to_sde_kms(kms);
+	struct drm_device *dev = sde_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
 static const struct msm_kms_funcs kms_funcs = {
 	.hw_init         = sde_hw_init,
+	.postinit        = sde_kms_postinit,
 	.irq_preinstall  = sde_irq_preinstall,
 	.irq_postinstall = sde_irq_postinstall,
 	.irq_uninstall   = sde_irq_uninstall,
 	.irq             = sde_irq,
+	.preclose        = sde_kms_preclose,
 	.prepare_fence   = sde_kms_prepare_fence,
 	.prepare_commit  = sde_prepare_commit,
 	.commit          = sde_commit,
@@ -365,8 +475,6 @@
 	.check_modified_format = sde_format_check_modified_format,
 	.get_format      = sde_get_msm_format,
 	.round_pixclk    = sde_round_pixclk,
-	.postopen        = sde_postopen,
-	.preclose        = sde_preclose,
 	.destroy         = sde_destroy,
 };
 
@@ -376,198 +484,6 @@
 	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
 }
 
-/**
- * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
- * @vbif:	Pointer to hardware vbif driver
- * @xin_id:	Client interface identifier
- * @return:	0 if success; error code otherwise
- */
-static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
-{
-	ktime_t timeout;
-	bool status;
-	int rc;
-
-	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
-		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
-		return -EINVAL;
-	}
-
-	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
-	for (;;) {
-		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
-		if (status)
-			break;
-		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
-			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
-			break;
-		}
-		usleep_range(501, 1000);
-	}
-
-	if (!status) {
-		rc = -ETIMEDOUT;
-		SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
-				vbif->idx - VBIF_0, xin_id);
-	} else {
-		rc = 0;
-		SDE_DEBUG("VBIF %d client %d is halted\n",
-				vbif->idx - VBIF_0, xin_id);
-	}
-
-	return rc;
-}
-
-/**
- * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
- * @vbif:	Pointer to hardware vbif driver
- * @ot_lim:	Pointer to OT limit to be modified
- * @params:	Pointer to usecase parameters
- */
-static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
-		u32 *ot_lim, struct sde_vbif_set_ot_params *params)
-{
-	u64 pps;
-	const struct sde_vbif_dynamic_ot_tbl *tbl;
-	u32 i;
-
-	if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
-		return;
-
-	/* Dynamic OT setting done only for WFD */
-	if (!params->is_wfd)
-		return;
-
-	pps = params->frame_rate;
-	pps *= params->width;
-	pps *= params->height;
-
-	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
-			&vbif->cap->dynamic_ot_wr_tbl;
-
-	for (i = 0; i < tbl->count; i++) {
-		if (pps <= tbl->cfg[i].pps) {
-			*ot_lim = tbl->cfg[i].ot_limit;
-			break;
-		}
-	}
-
-	SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
-			vbif->idx - VBIF_0, params->xin_id,
-			params->width, params->height, params->frame_rate,
-			pps, *ot_lim);
-}
-
-/**
- * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
- * @vbif:	Pointer to hardware vbif driver
- * @params:	Pointer to usecase parameters
- * @return:	OT limit
- */
-static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
-	struct sde_vbif_set_ot_params *params)
-{
-	u32 ot_lim = 0;
-	u32 val;
-
-	if (!vbif || !vbif->cap) {
-		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
-		return -EINVAL;
-	}
-
-	if (vbif->cap->default_ot_wr_limit && !params->rd)
-		ot_lim = vbif->cap->default_ot_wr_limit;
-	else if (vbif->cap->default_ot_rd_limit && params->rd)
-		ot_lim = vbif->cap->default_ot_rd_limit;
-
-	/*
-	 * If default ot is not set from dt/catalog,
-	 * then do not configure it.
-	 */
-	if (ot_lim == 0)
-		goto exit;
-
-	/* Modify the limits if the target and the use case requires it */
-	_sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
-
-	if (vbif && vbif->ops.get_limit_conf) {
-		val = vbif->ops.get_limit_conf(vbif,
-				params->xin_id, params->rd);
-		if (val == ot_lim)
-			ot_lim = 0;
-	}
-
-exit:
-	SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
-			vbif->idx - VBIF_0, params->xin_id, ot_lim);
-	return ot_lim;
-}
-
-/**
- * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
- * @vbif:	Pointer to hardware vbif driver
- * @params:	Pointer to usecase parameters
- *
- * Note this function would block waiting for bus halt.
- */
-void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
-		struct sde_vbif_set_ot_params *params)
-{
-	struct sde_hw_vbif *vbif = NULL;
-	struct sde_hw_mdp *mdp;
-	bool forced_on = false;
-	u32 ot_lim;
-	int ret, i;
-
-	if (!sde_kms) {
-		SDE_ERROR("invalid arguments\n");
-		return;
-	}
-	mdp = sde_kms->hw_mdp;
-
-	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
-		if (sde_kms->hw_vbif[i] &&
-				sde_kms->hw_vbif[i]->idx == params->vbif_idx)
-			vbif = sde_kms->hw_vbif[i];
-	}
-
-	if (!vbif || !mdp) {
-		SDE_ERROR("invalid arguments vbif %d mdp %d\n",
-				vbif != 0, mdp != 0);
-		return;
-	}
-
-	if (!mdp->ops.setup_clk_force_ctrl ||
-			!vbif->ops.set_limit_conf ||
-			!vbif->ops.set_halt_ctrl)
-		return;
-
-	ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
-
-	if (ot_lim == 0)
-		goto exit;
-
-	trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
-		params->vbif_idx);
-
-	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
-
-	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
-
-	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
-
-	ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
-	if (ret)
-		MSM_EVT(sde_kms->dev, vbif->idx, params->xin_id);
-
-	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
-
-	if (forced_on)
-		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
-	return;
-}
-
 int sde_mmu_init(struct sde_kms *sde_kms)
 {
 	struct msm_mmu *mmu;
@@ -699,13 +615,6 @@
 		goto kms_destroy;
 	}
 
-	rc = sde_power_clk_set_rate(&priv->phandle, "core_clk",
-		DEFAULT_MDP_SRC_CLK);
-	if (rc) {
-		SDE_ERROR("core clock set rate failed\n");
-		goto clk_rate_err;
-	}
-
 	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
 		true);
 	if (rc) {
@@ -715,9 +624,9 @@
 
 	core_hw_rev_init(sde_kms);
 
-	sde_kms->catalog = sde_hw_catalog_init(GET_MAJOR_REV(sde_kms->core_rev),
-			GET_MINOR_REV(sde_kms->core_rev),
-			GET_STEP_REV(sde_kms->core_rev));
+	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
+
+	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
 	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
 		SDE_ERROR("catalog init failed\n");
 		rc = PTR_ERR(sde_kms->catalog);
@@ -748,7 +657,9 @@
 		goto catalog_err;
 	}
 
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr))
+		goto catalog_err;
 
 	/*
 	 * Now we need to read the HW catalog and initialize resources such as
@@ -778,8 +689,7 @@
 	 * max crtc width is equal to the max mixer width * 2 and max height is
 	 * is 4K
 	 */
-	dev->mode_config.max_width =
-			sde_kms->catalog->mixer[0].sblk->maxwidth * 2;
+	dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
 	dev->mode_config.max_height = 4096;
 
 	/*
@@ -787,9 +697,7 @@
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr))
-		goto clk_rate_err;
+	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
 	return &sde_kms->base;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d6bfd85..d1ec5c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
 #include "sde_hw_wb.h"
 #include "sde_hw_top.h"
 #include "sde_connector.h"
+#include "sde_crtc.h"
 #include "sde_rm.h"
 #include "sde_power_handle.h"
 #include "sde_irq.h"
@@ -51,7 +52,7 @@
 			pr_debug(fmt, ##__VA_ARGS__);                      \
 	} while (0)
 
-#define SDE_ERROR(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)
 
 #define POPULATE_RECT(rect, a, b, c, d, Q16_flag) \
 	do {						\
@@ -75,6 +76,8 @@
 #define ktime_compare_safe(A, B) \
 	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
 
+#define SDE_NAME_SIZE  12
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @func: intr handler
@@ -149,18 +152,6 @@
 
 #define to_sde_kms(x) container_of(x, struct sde_kms, base)
 
-struct sde_vbif_set_ot_params {
-	u32 xin_id;
-	u32 num;
-	u32 width;
-	u32 height;
-	u32 frame_rate;
-	bool rd;
-	bool is_wfd;
-	u32 vbif_idx;
-	u32 clk_ctrl;
-};
-
 struct sde_plane_state {
 	struct drm_plane_state base;
 
@@ -176,11 +167,10 @@
 	/* assigned by crtc blender */
 	enum sde_stage stage;
 
-	/* some additional transactional status to help us know in the
-	 * apply path whether we need to update SMP allocation, and
-	 * whether current update is still pending:
-	 */
-	bool mode_changed : 1;
+	/* bitmask for which pipe h/w config functions need to be updated */
+	uint32_t dirty;
+
+	/* whether the current update is still pending */
 	bool pending : 1;
 };
 
@@ -199,6 +189,13 @@
 	((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
 
 /**
+ * sde_is_custom_client - whether or not to enable non-standard customizations
+ *
+ * Return: Whether or not the 'sdeclient' module parameter was set on boot up
+ */
+bool sde_is_custom_client(void);
+
+/**
  * Debugfs functions - extra helper functions for debugfs support
  *
  * Main debugfs documentation is located at,
@@ -217,7 +214,7 @@
 struct sde_debugfs_regset32 {
 	uint32_t offset;
 	uint32_t blk_len;
-	void __iomem *base;
+	struct sde_kms *sde_kms;
 };
 
 /**
@@ -227,10 +224,10 @@
  * @regset: opaque register definition structure
  * @offset: sub-block offset
  * @length: sub-block length, in bytes
- * @base: base IOMEM address
+ * @sde_kms: pointer to sde kms structure
  */
 void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
-		uint32_t offset, uint32_t length, void __iomem *base);
+		uint32_t offset, uint32_t length, struct sde_kms *sde_kms);
 
 /**
  * sde_debugfs_create_regset32 - Create register read back file for debugfs
@@ -400,7 +397,8 @@
 enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
 void sde_plane_flush(struct drm_plane *plane);
 struct drm_plane *sde_plane_init(struct drm_device *dev,
-		uint32_t pipe, bool primary_plane);
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs);
 
 /**
  * sde_plane_wait_input_fence - wait for input fence object
@@ -422,35 +420,6 @@
 		uint32_t color, uint32_t alpha);
 
 /**
- * CRTC functions
- */
-int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
-
-/**
- * sde_crtc_prepare_fence - callback to prepare for output fences
- * @crtc: Pointer to drm crtc object
- */
-void sde_crtc_prepare_fence(struct drm_crtc *crtc);
-
-/**
- * sde_crtc_init - create a new crtc object
- * @dev: sde device
- * @plane: base plane
- * @vblank_id: Id for reporting vblank. Id in range from 0..dev->num_crtcs.
- * @Return: new crtc object or error
- */
-struct drm_crtc *sde_crtc_init(struct drm_device *dev,
-		struct drm_plane *plane,
-		int vblank_id);
-
-/**
- * sde_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- */
-void sde_crtc_complete_commit(struct drm_crtc *crtc);
-
-/**
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * @encoder:	encoder pointer
  * @hw_res:	resource table to populate with encoder required resources
@@ -461,14 +430,6 @@
 		struct drm_connector_state *conn_state);
 
 /**
- * sde_encoder_needs_ctl_start - Get whether encoder type requires ctl_start
- *	CMD and WB encoders need ctl_start, video encs do not.
- * @encoder:	encoder pointer
- * @Return: true if the encoder type requires ctl_start issued
- */
-bool sde_encoder_needs_ctl_start(struct drm_encoder *encoder);
-
-/**
  * sde_encoder_register_vblank_callback - provide callback to encoder that
  *	will be called on the next vblank.
  * @encoder:	encoder pointer
@@ -486,13 +447,9 @@
  *	Delayed: Save the callback, and return. Does not block. Callback will
  *	be triggered later. E.g. cmd encoder will trigger at pp_done irq
  *	irq if it outstanding.
- *	Callback registered is expected to flush _all_ ctl paths of the crtc
  * @encoder:	encoder pointer
- * @cb:		callback pointer, provide NULL to deregister
- * @data:	user data provided to callback
  */
-void sde_encoder_schedule_kickoff(struct drm_encoder *encoder,
-		void (*cb)(void *), void *data);
+void sde_encoder_schedule_kickoff(struct drm_encoder *encoder);
 
 /**
  * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
@@ -512,12 +469,4 @@
  */
 void sde_encoders_init(struct drm_device *dev);
 
-/**
- * sde_vbif_set_ot_limit - set OT limit for vbif client
- * @sde_kms:	SDE handler
- * @params:	Pointer to OT configuration parameters
- */
-void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
-		struct sde_vbif_set_ot_params *params);
-
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index a15985e..5257b8d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,13 @@
 #include "sde_hw_sspp.h"
 #include "sde_trace.h"
 #include "sde_crtc.h"
+#include "sde_vbif.h"
+
+#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_PLANE(pl, fmt, ...) SDE_ERROR("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
 #define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
 #define PHASE_STEP_SHIFT	21
@@ -38,6 +45,12 @@
 
 #define SDE_PLANE_COLOR_FILL_FLAG	BIT(31)
 
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS	0x1
+#define SDE_PLANE_DIRTY_FORMAT	0x2
+#define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
+
 /**
  * enum sde_plane_qos - Different qos configurations for each pipe
  *
@@ -52,6 +65,12 @@
 	SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
 };
 
+/*
+ * struct sde_plane - local sde plane structure
+ * @csc_cfg: Decoded user configuration for csc
+ * @csc_usr_ptr: Points to csc_cfg if valid user config available
+ * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ */
 struct sde_plane {
 	struct drm_plane base;
 
@@ -66,7 +85,6 @@
 
 	struct sde_hw_pipe *pipe_hw;
 	struct sde_hw_pipe_cfg pipe_cfg;
-	struct sde_hw_pixel_ext pixel_ext;
 	struct sde_hw_sharp_cfg sharp_cfg;
 	struct sde_hw_scaler3_cfg scaler3_cfg;
 	struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
@@ -74,7 +92,11 @@
 	bool is_error;
 	bool is_rt_pipe;
 
+	struct sde_hw_pixel_ext pixel_ext;
+	bool pixel_ext_usr;
+
 	struct sde_csc_cfg csc_cfg;
+	struct sde_csc_cfg *csc_usr_ptr;
 	struct sde_csc_cfg *csc_ptr;
 
 	const struct sde_sspp_sub_blks *pipe_sblk;
@@ -210,6 +232,7 @@
 	bool is_rt = false;
 
 	/* check if this plane has a physical connector interface */
+	mutex_lock(&plane->dev->mode_config.mutex);
 	drm_for_each_connector(connector, plane->dev)
 		if (connector->state &&
 				(connector->state->crtc == crtc) &&
@@ -218,6 +241,7 @@
 			is_rt = true;
 			break;
 		}
+	mutex_unlock(&plane->dev->mode_config.mutex);
 
 	SDE_DEBUG("plane%u: pnum:%d rt:%d\n",
 			plane->base.id, psde->pipe - SSPP_VIG0, is_rt);
@@ -455,11 +479,14 @@
 }
 
 /* helper to update a state's input fence pointer from the property */
-static void _sde_plane_set_input_fence(struct drm_plane *plane,
+static void _sde_plane_set_input_fence(struct sde_plane *psde,
 		struct sde_plane_state *pstate, uint64_t fd)
 {
-	if (!plane || !pstate)
+	if (!psde || !pstate) {
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				psde != 0, pstate != 0);
 		return;
+	}
 
 	/* clear previous reference */
 	if (pstate->input_fence)
@@ -468,39 +495,47 @@
 	/* get fence pointer for later */
 	pstate->input_fence = sde_sync_get(fd);
 
-	SDE_DEBUG("0x%llX\n", fd);
+	SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
 }
 
 int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
 {
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
+	uint32_t prefix;
 	void *input_fence;
 	int ret = -EINVAL;
 
 	if (!plane) {
 		SDE_ERROR("invalid plane\n");
 	} else if (!plane->state) {
-		SDE_ERROR("invalid plane state\n");
+		SDE_ERROR_PLANE(to_sde_plane(plane), "invalid state\n");
 	} else {
 		psde = to_sde_plane(plane);
 		pstate = to_sde_plane_state(plane->state);
 		input_fence = pstate->input_fence;
 
 		if (input_fence) {
+			prefix = sde_sync_get_name_prefix(input_fence);
 			ret = sde_sync_wait(input_fence, wait_ms);
+
+			MSM_EVT(plane->dev,
+				plane->base.id,
+				(uint64_t)-ret << (sizeof(uint32_t) * CHAR_BIT)
+				| prefix);
+
 			switch (ret) {
 			case 0:
-				SDE_DEBUG("%s signaled\n", psde->pipe_name);
+				SDE_DEBUG_PLANE(psde, "signaled\n");
 				break;
 			case -ETIME:
-				SDE_ERROR("timeout on %s, %ums\n",
-						psde->pipe_name, wait_ms);
+				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
+						wait_ms, prefix);
 				psde->is_error = true;
 				break;
 			default:
-				SDE_ERROR("error on %s, %d\n",
-						psde->pipe_name, ret);
+				SDE_ERROR_PLANE(psde, "error %d on %08X\n",
+						ret, prefix);
 				psde->is_error = true;
 				break;
 			}
@@ -511,31 +546,33 @@
 	return ret;
 }
 
-static void _sde_plane_set_scanout(struct drm_plane *plane,
+static inline void _sde_plane_set_scanout(struct drm_plane *plane,
 		struct sde_plane_state *pstate,
 		struct sde_hw_pipe_cfg *pipe_cfg,
 		struct drm_framebuffer *fb)
 {
 	struct sde_plane *psde;
-	int ret, i;
+	int ret;
 
-	if (!plane || !pstate || !pipe_cfg || !fb)
-		return;
-
-	psde = to_sde_plane(plane);
-
-	ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
-	if (ret) {
-		SDE_ERROR("failed to get format layout, error: %d\n", ret);
+	if (!plane || !pstate || !pipe_cfg || !fb) {
+		SDE_ERROR(
+			"invalid arg(s), plane %d state %d cfg %d fb %d\n",
+			plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
 		return;
 	}
 
-	if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
-			BIT(SDE_DRM_DEINTERLACE))
-		for (i = 0; i < SDE_MAX_PLANES; ++i)
-			pipe_cfg->layout.plane_pitch[i] <<= 1;
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
+		return;
+	}
 
-	if (psde->pipe_hw && psde->pipe_hw->ops.setup_sourceaddress)
+	ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+	if (ret == -EAGAIN)
+		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
+	else if (ret)
+		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
+	else if (psde->pipe_hw->ops.setup_sourceaddress)
 		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
 }
 
@@ -548,7 +585,7 @@
 }
 
 /**
- * _sde_plane_setup_scaler2(): Determine default scaler phase steps/filter type
+ * _sde_plane_setup_scaler2 - determine default scaler phase steps/filter type
  * @psde: Pointer to SDE plane object
  * @src: Source size
  * @dst: Destination size
@@ -565,7 +602,9 @@
 		uint32_t chroma_subsampling)
 {
 	if (!psde || !phase_steps || !filter || !fmt) {
-		SDE_ERROR("invalid arguments\n");
+		SDE_ERROR(
+			"invalid arg(s), plane %d phase %d filter %d fmt %d\n",
+			psde != 0, phase_steps != 0, filter != 0, fmt != 0);
 		return -EINVAL;
 	}
 
@@ -593,7 +632,6 @@
 		}
 	} else {
 		/* disable scaler */
-		SDE_DEBUG("disable scaler\n");
 		filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_MAX;
 		filter[SDE_SSPP_COMP_1_2] = SDE_SCALE_FILTER_MAX;
 		filter[SDE_SSPP_COMP_3] = SDE_SCALE_FILTER_MAX;
@@ -694,50 +732,7 @@
 	}
 }
 
-/**
- * _sde_plane_verify_blob - verify incoming blob is big enough to contain
- *                          sub-structure
- * @blob_ptr: Pointer to start of incoming blob data
- * @blob_size: Size of incoming blob data, in bytes
- * @sub_ptr: Pointer to start of desired sub-structure
- * @sub_size: Required size of sub-structure, in bytes
- */
-static int _sde_plane_verify_blob(void *blob_ptr,
-		size_t blob_size,
-		void *sub_ptr,
-		size_t sub_size)
-{
-	/*
-	 * Use the blob size provided by drm to check if there are enough
-	 * bytes from the start of versioned sub-structures to the end of
-	 * blob data:
-	 *
-	 * e.g.,
-	 * blob_ptr             --> struct blob_data {
-	 *                                  uint32_t version;
-	 * sub_ptr              -->         struct blob_data_v1 v1;
-	 * sub_ptr + sub_size   -->         struct blob_stuff more_stuff;
-	 * blob_ptr + blob_size --> };
-	 *
-	 * It's important to check the actual number of bytes from the start
-	 * of the sub-structure to the end of the blob data, and not just rely
-	 * on something like,
-	 *
-	 * sizeof(blob) - sizeof(blob->version) >= sizeof(sub-struct)
-	 *
-	 * This is because the start of the sub-structure can vary based on
-	 * how the compiler pads the overall structure.
-	 */
-	if (blob_ptr && sub_ptr)
-		/* return zero if end of blob >= end of sub-struct */
-		return ((unsigned char *)blob_ptr + blob_size) <
-			((unsigned char *)sub_ptr + sub_size);
-	return -EINVAL;
-}
-
-static void _sde_plane_setup_csc(struct sde_plane *psde,
-		struct sde_plane_state *pstate,
-		const struct sde_format *fmt)
+static inline void _sde_plane_setup_csc(struct sde_plane *psde)
 {
 	static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
 		{
@@ -753,128 +748,44 @@
 		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
 		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
 	};
-	static const struct sde_csc_cfg sde_csc_NOP = {
-		{
-			/* identity matrix, S15.16 format */
-			0x10000, 0x00000, 0x00000,
-			0x00000, 0x10000, 0x00000,
-			0x00000, 0x00000, 0x10000,
-		},
-		/* signed bias */
-		{ 0x0, 0x0, 0x0,},
-		{ 0x0, 0x0, 0x0,},
-		/* unsigned clamp */
-		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
-		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
-	};
-	struct sde_drm_csc *csc = NULL;
-	size_t csc_size = 0;
-	int i;
 
-	if (!psde || !pstate || !fmt) {
-		SDE_ERROR("invalid arguments\n");
+	if (!psde) {
+		SDE_ERROR("invalid plane\n");
 		return;
 	}
-	if (!psde->pipe_hw || !psde->pipe_hw->ops.setup_csc)
-		return;
-
-	/* check for user space override */
-	psde->csc_ptr = NULL;
-	csc = msm_property_get_blob(&psde->property_info,
-			pstate->property_blobs,
-			&csc_size,
-			PLANE_PROP_CSC);
-	if (csc) {
-		/* user space override */
-		memcpy(&psde->csc_cfg,
-				&sde_csc_NOP,
-				sizeof(struct sde_csc_cfg));
-		switch (csc->version) {
-		case SDE_DRM_CSC_V1:
-			if (!_sde_plane_verify_blob(csc,
-					csc_size,
-					&csc->v1,
-					sizeof(struct sde_drm_csc_v1))) {
-				for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
-					psde->csc_cfg.csc_mv[i] =
-						csc->v1.ctm_coeff[i] >> 16;
-				for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
-					psde->csc_cfg.csc_pre_bv[i] =
-						csc->v1.pre_bias[i];
-					psde->csc_cfg.csc_post_bv[i] =
-						csc->v1.post_bias[i];
-				}
-				for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
-					psde->csc_cfg.csc_pre_lv[i] =
-						csc->v1.pre_clamp[i];
-					psde->csc_cfg.csc_post_lv[i] =
-						csc->v1.post_clamp[i];
-				}
-				psde->csc_ptr = &psde->csc_cfg;
-			}
-			break;
-		default:
-			break;
-		}
-		if (!psde->csc_ptr)
-			SDE_ERROR("invalid csc blob, v%lld\n", csc->version);
-	}
 
 	/* revert to kernel default if override not available */
-	if (psde->csc_ptr)
-		SDE_DEBUG("user blob override for csc\n");
-	else if (SDE_FORMAT_IS_YUV(fmt))
+	if (psde->csc_usr_ptr)
+		psde->csc_ptr = psde->csc_usr_ptr;
+	else
 		psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
+
+	SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
+			psde->csc_ptr->csc_mv[0],
+			psde->csc_ptr->csc_mv[1],
+			psde->csc_ptr->csc_mv[2]);
 }
 
 static void _sde_plane_setup_scaler(struct sde_plane *psde,
 		const struct sde_format *fmt,
 		struct sde_plane_state *pstate)
 {
-	struct sde_hw_pixel_ext *pe = NULL;
-	struct sde_drm_scaler *sc_u = NULL;
-	struct sde_drm_scaler_v1 *sc_u1 = NULL;
-	size_t sc_u_size = 0;
+	struct sde_hw_pixel_ext *pe;
 	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
-	uint32_t tmp;
-	int i;
+	uint32_t tmp, i;
 
-	if (!psde || !fmt)
+	if (!psde || !fmt) {
+		SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+				psde != 0, fmt != 0, pstate != 0);
 		return;
+	}
 
 	pe = &(psde->pixel_ext);
-	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
 
-	/* get scaler config from user space */
-	if (pstate)
-		sc_u = msm_property_get_blob(&psde->property_info,
-				pstate->property_blobs,
-				&sc_u_size,
-				PLANE_PROP_SCALER);
-	if (sc_u) {
-		switch (sc_u->version) {
-		case SDE_DRM_SCALER_V1:
-			if (!_sde_plane_verify_blob(sc_u,
-						sc_u_size,
-						&sc_u->v1,
-						sizeof(*sc_u1)))
-				sc_u1 = &sc_u->v1;
-			break;
-		default:
-			SDE_DEBUG("unrecognized scaler blob v%lld\n",
-							sc_u->version);
-			break;
-		}
-	}
-
-	/* decimation */
-	if (sc_u1 && (sc_u1->enable & SDE_DRM_SCALER_DECIMATE)) {
-		psde->pipe_cfg.horz_decimation = sc_u1->horz_decimate;
-		psde->pipe_cfg.vert_decimation = sc_u1->vert_decimate;
-	} else {
-		psde->pipe_cfg.horz_decimation = 0;
-		psde->pipe_cfg.vert_decimation = 0;
-	}
+	psde->pipe_cfg.horz_decimation =
+		sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+	psde->pipe_cfg.vert_decimation =
+		sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
 
 	/* don't chroma subsample if decimating */
 	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
@@ -884,9 +795,8 @@
 
 	/* update scaler */
 	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
-		if (sc_u1 && (sc_u1->enable & SDE_DRM_SCALER_SCALER_3))
-			SDE_DEBUG("SCALER3 blob detected\n");
-		else
+		if (!psde->pixel_ext_usr) {
+			/* calculate default config for QSEED3 */
 			_sde_plane_setup_scaler3(psde,
 					psde->pipe_cfg.src_rect.w,
 					psde->pipe_cfg.src_rect.h,
@@ -894,56 +804,23 @@
 					psde->pipe_cfg.dst_rect.h,
 					&psde->scaler3_cfg, fmt,
 					chroma_subsmpl_h, chroma_subsmpl_v);
-	} else {
-		/* always calculate basic scaler config */
-		if (sc_u1 && (sc_u1->enable & SDE_DRM_SCALER_SCALER_2)) {
-			/* populate from user space */
-			for (i = 0; i < SDE_MAX_PLANES; i++) {
-				pe->init_phase_x[i] = sc_u1->init_phase_x[i];
-				pe->phase_step_x[i] = sc_u1->phase_step_x[i];
-				pe->init_phase_y[i] = sc_u1->init_phase_y[i];
-				pe->phase_step_y[i] = sc_u1->phase_step_y[i];
-
-				pe->horz_filter[i] = sc_u1->horz_filter[i];
-				pe->vert_filter[i] = sc_u1->vert_filter[i];
-			}
-		} else {
-			/* calculate phase steps */
-			_sde_plane_setup_scaler2(psde,
-					psde->pipe_cfg.src_rect.w,
-					psde->pipe_cfg.dst_rect.w,
-					pe->phase_step_x,
-					pe->horz_filter, fmt, chroma_subsmpl_h);
-			_sde_plane_setup_scaler2(psde,
-					psde->pipe_cfg.src_rect.h,
-					psde->pipe_cfg.dst_rect.h,
-					pe->phase_step_y,
-					pe->vert_filter, fmt, chroma_subsmpl_v);
 		}
-	}
+	} else if (!psde->pixel_ext_usr) {
+		/* calculate default configuration for QSEED2 */
+		memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
 
-	/* update pixel extensions */
-	if (sc_u1 && (sc_u1->enable & SDE_DRM_SCALER_PIX_EXT)) {
-		/* populate from user space */
-		SDE_DEBUG("pixel ext blob detected\n");
-		for (i = 0; i < SDE_MAX_PLANES; i++) {
-			pe->num_ext_pxls_left[i] = sc_u1->lr.num_pxls_start[i];
-			pe->num_ext_pxls_right[i] = sc_u1->lr.num_pxls_end[i];
-			pe->left_ftch[i] = sc_u1->lr.ftch_start[i];
-			pe->right_ftch[i] = sc_u1->lr.ftch_end[i];
-			pe->left_rpt[i] = sc_u1->lr.rpt_start[i];
-			pe->right_rpt[i] = sc_u1->lr.rpt_end[i];
-			pe->roi_w[i] = sc_u1->lr.roi[i];
+		SDE_DEBUG_PLANE(psde, "default config\n");
+		_sde_plane_setup_scaler2(psde,
+				psde->pipe_cfg.src_rect.w,
+				psde->pipe_cfg.dst_rect.w,
+				pe->phase_step_x,
+				pe->horz_filter, fmt, chroma_subsmpl_h);
+		_sde_plane_setup_scaler2(psde,
+				psde->pipe_cfg.src_rect.h,
+				psde->pipe_cfg.dst_rect.h,
+				pe->phase_step_y,
+				pe->vert_filter, fmt, chroma_subsmpl_v);
 
-			pe->num_ext_pxls_top[i] = sc_u1->tb.num_pxls_start[i];
-			pe->num_ext_pxls_btm[i] = sc_u1->tb.num_pxls_end[i];
-			pe->top_ftch[i] = sc_u1->tb.ftch_start[i];
-			pe->btm_ftch[i] = sc_u1->tb.ftch_end[i];
-			pe->top_rpt[i] = sc_u1->tb.rpt_start[i];
-			pe->btm_rpt[i] = sc_u1->tb.rpt_end[i];
-			pe->roi_h[i] = sc_u1->tb.roi[i];
-		}
-	} else {
 		/* calculate left/right/top/bottom pixel extensions */
 		tmp = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
 				psde->pipe_cfg.horz_decimation);
@@ -969,61 +846,51 @@
 
 		for (i = 0; i < SDE_MAX_PLANES; i++) {
 			if (pe->num_ext_pxls_left[i] >= 0)
-				pe->left_rpt[i] =
-					pe->num_ext_pxls_left[i];
+				pe->left_rpt[i] = pe->num_ext_pxls_left[i];
 			else
-				pe->left_ftch[i] =
-					pe->num_ext_pxls_left[i];
+				pe->left_ftch[i] = pe->num_ext_pxls_left[i];
 
 			if (pe->num_ext_pxls_right[i] >= 0)
-				pe->right_rpt[i] =
-					pe->num_ext_pxls_right[i];
+				pe->right_rpt[i] = pe->num_ext_pxls_right[i];
 			else
-				pe->right_ftch[i] =
-					pe->num_ext_pxls_right[i];
+				pe->right_ftch[i] = pe->num_ext_pxls_right[i];
 
 			if (pe->num_ext_pxls_top[i] >= 0)
-				pe->top_rpt[i] =
-					pe->num_ext_pxls_top[i];
+				pe->top_rpt[i] = pe->num_ext_pxls_top[i];
 			else
-				pe->top_ftch[i] =
-					pe->num_ext_pxls_top[i];
+				pe->top_ftch[i] = pe->num_ext_pxls_top[i];
 
 			if (pe->num_ext_pxls_btm[i] >= 0)
-				pe->btm_rpt[i] =
-					pe->num_ext_pxls_btm[i];
+				pe->btm_rpt[i] = pe->num_ext_pxls_btm[i];
 			else
-				pe->btm_ftch[i] =
-					pe->num_ext_pxls_btm[i];
+				pe->btm_ftch[i] = pe->num_ext_pxls_btm[i];
 		}
 	}
 }
 
 /**
  * _sde_plane_color_fill - enables color fill on plane
- * @plane:  Pointer to DRM plane object
+ * @psde:   Pointer to SDE plane object
  * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
  * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
  * Returns: 0 on success
  */
-static int _sde_plane_color_fill(struct drm_plane *plane,
+static int _sde_plane_color_fill(struct sde_plane *psde,
 		uint32_t color, uint32_t alpha)
 {
-	struct sde_plane *psde;
 	const struct sde_format *fmt;
 
-	if (!plane) {
+	if (!psde) {
 		SDE_ERROR("invalid plane\n");
 		return -EINVAL;
 	}
 
-	psde = to_sde_plane(plane);
 	if (!psde->pipe_hw) {
-		SDE_ERROR("invalid plane h/w pointer\n");
+		SDE_ERROR_PLANE(psde, "invalid plane h/w pointer\n");
 		return -EINVAL;
 	}
 
-	DBG("");
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	/*
 	 * select fill format to match user property expectation,
@@ -1059,7 +926,7 @@
 static int _sde_plane_mode_set(struct drm_plane *plane,
 				struct drm_plane_state *state)
 {
-	uint32_t nplanes, src_flags, zpos, split_width;
+	uint32_t nplanes, src_flags;
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
 	const struct sde_format *fmt;
@@ -1067,9 +934,13 @@
 	struct drm_framebuffer *fb;
 	struct sde_rect src, dst;
 	bool q16_data = true;
+	int idx;
 
-	if (!plane || !plane->state) {
-		SDE_ERROR("invalid plane/state\n");
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return -EINVAL;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
 		return -EINVAL;
 	}
 
@@ -1079,93 +950,134 @@
 	crtc = state->crtc;
 	fb = state->fb;
 	if (!crtc || !fb) {
-		SDE_ERROR("invalid crtc/fb\n");
+		SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
+				crtc != 0, fb != 0);
 		return -EINVAL;
 	}
 	fmt = to_sde_format(msm_framebuffer_format(fb));
 	nplanes = fmt->num_planes;
 
-	psde->is_rt_pipe = _sde_plane_is_rt_pipe(plane, crtc);
-
-	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
-
-	POPULATE_RECT(&src, state->src_x, state->src_y,
-		state->src_w, state->src_h, q16_data);
-	POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
-		state->crtc_w, state->crtc_h, !q16_data);
-
-	SDE_DEBUG("%s:FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u, %s ubwc %d\n",
-			psde->pipe_name,
-			fb->base.id, src.x, src.y, src.w, src.h,
-			crtc->base.id, dst.x, dst.y, dst.w, dst.h,
-			drm_get_format_name(fmt->base.pixel_format),
-			SDE_FORMAT_IS_UBWC(fmt));
-
-	/* update format configuration */
-	memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
-	src_flags = 0;
-
-	/* flags */
-	SDE_DEBUG("flags 0x%llX, rotation 0x%llX\n",
-			sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG),
-			sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
-	if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
-		BIT(DRM_REFLECT_X))
-		src_flags |= SDE_SSPP_FLIP_LR;
-	if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
-		BIT(DRM_REFLECT_Y))
-		src_flags |= SDE_SSPP_FLIP_UD;
-	if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
-		BIT(SDE_DRM_DEINTERLACE)) {
-		src.h /= 2;
-		src.y  = DIV_ROUND_UP(src.y, 2);
-		src.y &= ~0x1;
+	/* determine what needs to be refreshed */
+	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+		switch (idx) {
+		case PLANE_PROP_SCALER_V1:
+		case PLANE_PROP_H_DECIMATE:
+		case PLANE_PROP_V_DECIMATE:
+		case PLANE_PROP_SRC_CONFIG:
+		case PLANE_PROP_ZPOS:
+			pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+			break;
+		case PLANE_PROP_CSC_V1:
+			pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+			break;
+		case PLANE_PROP_COLOR_FILL:
+			/* potentially need to refresh everything */
+			pstate->dirty = SDE_PLANE_DIRTY_ALL;
+			break;
+		case PLANE_PROP_ROTATION:
+			pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+			break;
+		case PLANE_PROP_INFO:
+		case PLANE_PROP_ALPHA:
+		case PLANE_PROP_INPUT_FENCE:
+		case PLANE_PROP_BLEND_OP:
+			/* no special action required */
+			break;
+		default:
+			/* unknown property, refresh everything */
+			pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+			SDE_ERROR("executing full mode set, prp_idx %d\n", idx);
+			break;
+		}
 	}
 
-	psde->pipe_cfg.src_rect = src;
-	psde->pipe_cfg.dst_rect = dst;
-
-	/* check for color fill */
-	psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
-			PLANE_PROP_COLOR_FILL);
-	if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
-		/* skip remaining processing on color fill */
-		return 0;
+	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
+		memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
 
 	_sde_plane_set_scanout(plane, pstate, &psde->pipe_cfg, fb);
 
-	_sde_plane_setup_scaler(psde, fmt, pstate);
+	/* early out if nothing dirty */
+	if (!pstate->dirty)
+		return 0;
+	pstate->pending = true;
 
-	/* base layer source split needs update */
-	zpos = sde_plane_get_property(pstate, PLANE_PROP_ZPOS);
-	if (zpos == SDE_STAGE_BASE) {
-		split_width = get_crtc_split_width(crtc);
-		if (psde->pipe_cfg.dst_rect.x >= split_width)
-			psde->pipe_cfg.dst_rect.x -= split_width;
+	psde->is_rt_pipe = _sde_plane_is_rt_pipe(plane, crtc);
+	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+
+	/* update roi config */
+	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
+		POPULATE_RECT(&src, state->src_x, state->src_y,
+			state->src_w, state->src_h, q16_data);
+		POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
+			state->crtc_w, state->crtc_h, !q16_data);
+
+		SDE_DEBUG_PLANE(psde,
+			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %s ubwc %d\n",
+				fb->base.id, src.x, src.y, src.w, src.h,
+				crtc->base.id, dst.x, dst.y, dst.w, dst.h,
+				drm_get_format_name(fmt->base.pixel_format),
+				SDE_FORMAT_IS_UBWC(fmt));
+
+		if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+			BIT(SDE_DRM_DEINTERLACE)) {
+			SDE_DEBUG_PLANE(psde, "deinterlace\n");
+			for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
+				psde->pipe_cfg.layout.plane_pitch[idx] <<= 1;
+			src.h /= 2;
+			src.y  = DIV_ROUND_UP(src.y, 2);
+			src.y &= ~0x1;
+		}
+
+		psde->pipe_cfg.src_rect = src;
+		psde->pipe_cfg.dst_rect = dst;
+
+		/* check for color fill */
+		psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
+				PLANE_PROP_COLOR_FILL);
+		if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+			/* skip remaining processing on color fill */
+			pstate->dirty = 0x0;
+		} else if (psde->pipe_hw->ops.setup_rects) {
+			_sde_plane_setup_scaler(psde, fmt, pstate);
+
+			psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
+					&psde->pipe_cfg, &psde->pixel_ext);
+		}
 	}
 
-	if (psde->pipe_hw->ops.setup_format)
-		psde->pipe_hw->ops.setup_format(psde->pipe_hw,
-				fmt, src_flags);
-	if (psde->pipe_hw->ops.setup_rects)
-		psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
-				&psde->pipe_cfg, &psde->pixel_ext);
+	if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+			psde->pipe_hw->ops.setup_format) {
+		src_flags = 0x0;
+		SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
+			sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
+		if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
+			BIT(DRM_REFLECT_X))
+			src_flags |= SDE_SSPP_FLIP_LR;
+		if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
+			BIT(DRM_REFLECT_Y))
+			src_flags |= SDE_SSPP_FLIP_UD;
+
+		/* update format */
+		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags);
+
+		/* update csc */
+		if (SDE_FORMAT_IS_YUV(fmt))
+			_sde_plane_setup_csc(psde);
+		else
+			psde->csc_ptr = 0;
+	}
 
 	/* update sharpening */
-	psde->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
-	psde->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
-	psde->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
-	psde->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+	if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
+		psde->pipe_hw->ops.setup_sharpening) {
+		psde->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+		psde->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+		psde->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+		psde->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
 
-	if (psde->pipe_hw->ops.setup_sharpening)
 		psde->pipe_hw->ops.setup_sharpening(psde->pipe_hw,
-			&psde->sharp_cfg);
-
-	/* update csc */
-	if (SDE_FORMAT_IS_YUV(fmt))
-		_sde_plane_setup_csc(psde, pstate, fmt);
-	else
-		psde->csc_ptr = 0;
+				&psde->sharp_cfg);
+	}
 
 	_sde_plane_set_qos_lut(plane, fb);
 	_sde_plane_set_danger_lut(plane, fb);
@@ -1175,6 +1087,9 @@
 		_sde_plane_set_ot_limit(plane, crtc);
 	}
 
+	/* clear dirty */
+	pstate->dirty = 0x0;
+
 	return 0;
 }
 
@@ -1187,20 +1102,20 @@
 	if (!new_state->fb)
 		return 0;
 
-	SDE_DEBUG("%s: FB[%u]\n", psde->pipe_name, fb->base.id);
+	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
 	return msm_framebuffer_prepare(fb, psde->mmu_id);
 }
 
 static void sde_plane_cleanup_fb(struct drm_plane *plane,
 		const struct drm_plane_state *old_state)
 {
-	struct drm_framebuffer *fb = old_state->fb;
-	struct sde_plane *psde = to_sde_plane(plane);
+	struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
 
 	if (!fb)
 		return;
 
-	SDE_DEBUG("%s: FB[%u]\n", psde->pipe_name, fb->base.id);
+	SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
 	msm_framebuffer_cleanup(fb, psde->mmu_id);
 }
 
@@ -1211,31 +1126,36 @@
 	struct sde_plane_state *pstate = to_sde_plane_state(state);
 
 	/* no need to check it again */
-	if (pstate->mode_changed)
+	if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
 		return;
 
-	if (!(sde_plane_enabled(state) && sde_plane_enabled(old_state))) {
-		SDE_DEBUG("%s: pipe enabling/disabling full modeset required\n",
-			psde->pipe_name);
-		pstate->mode_changed = true;
+	if (!sde_plane_enabled(state) || !sde_plane_enabled(old_state)
+			|| psde->is_error) {
+		SDE_DEBUG_PLANE(psde,
+			"enabling/disabling full modeset required\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
 	} else if (to_sde_plane_state(old_state)->pending) {
-		SDE_DEBUG("%s: still pending\n", psde->pipe_name);
-		pstate->mode_changed = true;
+		SDE_DEBUG_PLANE(psde, "still pending\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_ALL;
 	} else if (state->src_w != old_state->src_w ||
 		   state->src_h != old_state->src_h ||
 		   state->src_x != old_state->src_x ||
 		   state->src_y != old_state->src_y) {
-		SDE_DEBUG("%s: src rect updated\n", psde->pipe_name);
-		pstate->mode_changed = true;
+		SDE_DEBUG_PLANE(psde, "src rect updated\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 	} else if (state->crtc_w != old_state->crtc_w ||
 		   state->crtc_h != old_state->crtc_h ||
 		   state->crtc_x != old_state->crtc_x ||
 		   state->crtc_y != old_state->crtc_y) {
-		SDE_DEBUG("%s: crtc rect updated\n", psde->pipe_name);
-		pstate->mode_changed = true;
+		SDE_DEBUG_PLANE(psde, "crtc rect updated\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+	}
+
+	if (!state->fb || !old_state->fb) {
+		SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
 	} else if (state->fb->pixel_format != old_state->fb->pixel_format) {
-		SDE_DEBUG("%s: format change!\n", psde->pipe_name);
-		pstate->mode_changed = true;
+		SDE_DEBUG_PLANE(psde, "format change\n");
+		pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
 	} else {
 		uint64_t *new_mods = state->fb->modifier;
 		uint64_t *old_mods = old_state->fb->modifier;
@@ -1247,80 +1167,54 @@
 
 		for (i = 0; i < ARRAY_SIZE(state->fb->modifier); i++) {
 			if (new_mods[i] != old_mods[i]) {
-				SDE_DEBUG("%s: format modifiers change\"\
+				SDE_DEBUG_PLANE(psde,
+					"format modifiers change\"\
 					plane:%d new_mode:%llu old_mode:%llu\n",
-					psde->pipe_name, i, new_mods[i],
-					old_mods[i]);
-				pstate->mode_changed = true;
+					i, new_mods[i], old_mods[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+					SDE_PLANE_DIRTY_RECTS;
 				break;
 			}
 		}
 		for (i = 0; i < ARRAY_SIZE(state->fb->pitches); i++) {
 			if (new_pitches[i] != old_pitches[i]) {
-				SDE_DEBUG("%s: pitches change plane:%d\"\
+				SDE_DEBUG_PLANE(psde,
+					"pitches change plane:%d\"\
 					old_pitches:%u new_pitches:%u\n",
-					psde->pipe_name, i, old_pitches[i],
-					new_pitches[i]);
-				pstate->mode_changed = true;
+					i, old_pitches[i], new_pitches[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 				break;
 			}
 		}
 		for (i = 0; i < ARRAY_SIZE(state->fb->offsets); i++) {
 			if (new_offset[i] != old_offset[i]) {
-				SDE_DEBUG("%s: offset change plane:%d\"\
+				SDE_DEBUG_PLANE(psde,
+					"offset change plane:%d\"\
 					old_offset:%u new_offset:%u\n",
-					psde->pipe_name, i, old_offset[i],
-					new_offset[i]);
-				pstate->mode_changed = true;
+					i, old_offset[i], new_offset[i]);
+				pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+					SDE_PLANE_DIRTY_RECTS;
 				break;
 			}
 		}
 	}
 }
 
-static bool __get_scale_data(struct sde_plane *psde,
-	struct sde_plane_state *pstate, struct sde_drm_scaler *sc_u,
-	size_t *sc_u_size)
-{
-	bool valid_flag = false;
-
-	sc_u = msm_property_get_blob(&psde->property_info,
-			pstate->property_blobs,
-			sc_u_size,
-			PLANE_PROP_SCALER);
-	if (sc_u) {
-		switch (sc_u->version) {
-		case SDE_DRM_SCALER_V1:
-			if (!_sde_plane_verify_blob(sc_u, *sc_u_size,
-				&sc_u->v1, sizeof(struct sde_drm_scaler_v1)))
-				valid_flag = true;
-			break;
-		default:
-			SDE_DEBUG("unrecognized scaler blob v%lld\n",
-							sc_u->version);
-			break;
-		}
-	}
-
-	return valid_flag;
-}
-
 static int sde_plane_atomic_check(struct drm_plane *plane,
 		struct drm_plane_state *state)
 {
-	int ret = 0, valid_scale_data;
+	int ret = 0;
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
 	const struct sde_format *fmt;
-	size_t sc_u_size = 0;
-	struct sde_drm_scaler *sc_u = NULL;
 	struct sde_rect src, dst;
 	uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
 	uint32_t max_upscale, max_downscale, min_src_size, max_linewidth;
 	bool q16_data = true;
 
 	if (!plane || !state) {
-		SDE_ERROR("invalid plane/state\n");
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
 		ret = -EINVAL;
 		goto exit;
 	}
@@ -1329,14 +1223,13 @@
 	pstate = to_sde_plane_state(state);
 
 	if (!psde->pipe_sblk) {
-		SDE_ERROR("invalid plane catalog\n");
+		SDE_ERROR_PLANE(psde, "invalid catalog\n");
 		ret = -EINVAL;
 		goto exit;
 	}
 
-	valid_scale_data = __get_scale_data(psde, pstate, sc_u, &sc_u_size);
-	deci_w = valid_scale_data && sc_u ? sc_u->v1.horz_decimate : 0;
-	deci_h = valid_scale_data && sc_u ? sc_u->v1.vert_decimate : 0;
+	deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+	deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
 
 	/* src values are in Q16 fixed point, convert to integer */
 	POPULATE_RECT(&src, state->src_x, state->src_y, state->src_w,
@@ -1351,7 +1244,7 @@
 	max_downscale = psde->pipe_sblk->maxdwnscale;
 	max_linewidth = psde->pipe_sblk->maxlinewidth;
 
-	SDE_DEBUG("%s: check (%d -> %d)\n", psde->pipe_name,
+	SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
 		sde_plane_enabled(plane->state), sde_plane_enabled(state));
 
 	if (!sde_plane_enabled(state))
@@ -1364,7 +1257,8 @@
 	if (SDE_FORMAT_IS_YUV(fmt) &&
 		(!(psde->features & SDE_SSPP_SCALER) ||
 		 !(psde->features & BIT(SDE_SSPP_CSC)))) {
-		SDE_ERROR("plane doesn't have scaler/csc capability for yuv\n");
+		SDE_ERROR_PLANE(psde,
+				"plane doesn't have scaler/csc for yuv\n");
 		ret = -EINVAL;
 
 	/* check src bounds */
@@ -1373,20 +1267,20 @@
 		src.w < min_src_size || src.h < min_src_size ||
 		CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
 		CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
-		SDE_ERROR("invalid source (%u, %u) -> (%u, %u)\n",
+		SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
 			src.x, src.y, src.w, src.h);
 		ret = -E2BIG;
 
 	/* valid yuv image */
 	} else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1) || (src.y & 0x1) ||
 			 (src.w & 0x1) || (src.h & 0x1))) {
-		SDE_ERROR("invalid yuv source (%u, %u) -> (%u, %u)\n",
+		SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u, %ux%u\n",
 				src.x, src.y, src.w, src.h);
 		ret = -EINVAL;
 
 	/* min dst support */
 	} else if (dst.w < 0x1 || dst.h < 0x1) {
-		SDE_ERROR("invalid dest rect (%u, %u) -> (%u, %u)\n",
+		SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u, %ux%u\n",
 				dst.x, dst.y, dst.w, dst.h);
 		ret = -EINVAL;
 
@@ -1394,22 +1288,26 @@
 	} else if (deci_w || deci_h) {
 		if ((deci_w > psde->pipe_sblk->maxhdeciexp) ||
 			(deci_h > psde->pipe_sblk->maxvdeciexp)) {
-			SDE_ERROR("too much decimation requested\n");
+			SDE_ERROR_PLANE(psde,
+					"too much decimation requested\n");
 			ret = -EINVAL;
 		} else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
-			SDE_ERROR("decimation requires linear fetch\n");
+			SDE_ERROR_PLANE(psde,
+					"decimation requires linear fetch\n");
 			ret = -EINVAL;
 		}
 
 	} else if (!(psde->features & SDE_SSPP_SCALER) &&
 		((src.w != dst.w) || (src.h != dst.h))) {
-		SDE_ERROR("pipe doesn't support scaling %ux%u->%ux%u\n",
+		SDE_ERROR_PLANE(psde,
+			"pipe doesn't support scaling %ux%u->%ux%u\n",
 			src.w, src.h, dst.w, dst.h);
 		ret = -EINVAL;
 
 	/* check decimated source width */
 	} else if (src_deci_w > max_linewidth) {
-		SDE_ERROR("invalid source width:%u, deci wid:%u, line wid:%u\n",
+		SDE_ERROR_PLANE(psde,
+				"invalid src w:%u, deci w:%u, line w:%u\n",
 				src.w, src_deci_w, max_linewidth);
 		ret = -E2BIG;
 
@@ -1418,7 +1316,8 @@
 		((src_deci_h * max_upscale) < dst.h) ||
 		((dst.w * max_downscale) < src_deci_w) ||
 		((dst.h * max_downscale) < src_deci_h)) {
-		SDE_ERROR("too much scaling requested %ux%u -> %ux%u\n",
+		SDE_ERROR_PLANE(psde,
+			"too much scaling requested %ux%u->%ux%u\n",
 			src_deci_w, src_deci_h, dst.w, dst.h);
 		ret = -E2BIG;
 	}
@@ -1438,8 +1337,10 @@
 {
 	struct sde_plane *psde;
 
-	if (!plane)
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
 		return;
+	}
 
 	psde = to_sde_plane(plane);
 
@@ -1449,10 +1350,10 @@
 	 */
 	if (psde->is_error)
 		/* force white frame with 0% alpha pipe output on error */
-		_sde_plane_color_fill(plane, 0xFFFFFF, 0x0);
+		_sde_plane_color_fill(psde, 0xFFFFFF, 0x0);
 	else if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
 		/* force 100% alpha */
-		_sde_plane_color_fill(plane, psde->color_fill, 0xFF);
+		_sde_plane_color_fill(psde, psde->color_fill, 0xFF);
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
@@ -1464,41 +1365,40 @@
 static void sde_plane_atomic_update(struct drm_plane *plane,
 				struct drm_plane_state *old_state)
 {
-	struct sde_plane *sde_plane;
+	struct sde_plane *psde;
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
 
-	if (!plane || !plane->state) {
-		SDE_ERROR("invalid plane/state\n");
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
 		return;
 	}
 
-	sde_plane = to_sde_plane(plane);
-	sde_plane->is_error = false;
+	psde = to_sde_plane(plane);
+	psde->is_error = false;
 	state = plane->state;
 	pstate = to_sde_plane_state(state);
 
-	SDE_DEBUG("%s: update\n", sde_plane->pipe_name);
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	if (!sde_plane_enabled(state)) {
 		pstate->pending = true;
-	} else if (pstate->mode_changed) {
+	} else {
 		int ret;
 
-		pstate->pending = true;
 		ret = _sde_plane_mode_set(plane, state);
 		/* atomic_check should have ensured that this doesn't fail */
 		WARN_ON(ret < 0);
-	} else {
-		_sde_plane_set_scanout(plane, pstate,
-				&sde_plane->pipe_cfg, state->fb);
 	}
 }
 
 
 /* helper to install properties which are common to planes and crtcs */
 static void _sde_plane_install_properties(struct drm_plane *plane,
-	u32 max_blendstages)
+	struct sde_mdss_cfg *catalog)
 {
 	static const struct drm_prop_enum_list e_blend_op[] = {
 		{SDE_DRM_BLEND_OP_NOT_DEFINED,    "not_defined"},
@@ -1512,14 +1412,31 @@
 	const struct sde_format_extended *format_list;
 	struct sde_kms_info *info;
 	struct sde_plane *psde = to_sde_plane(plane);
+	int zpos_max = 255;
+	int zpos_def = 0;
 
-	if (!plane || !psde || !psde->pipe_hw || !psde->pipe_sblk) {
-		SDE_ERROR("Invalid argument(s)\n");
+	if (!plane || !psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	} else if (!psde->pipe_hw || !psde->pipe_sblk) {
+		SDE_ERROR("invalid plane, pipe_hw %d pipe_sblk %d\n",
+				psde->pipe_hw != 0, psde->pipe_sblk != 0);
+		return;
+	} else if (!catalog) {
+		SDE_ERROR("invalid catalog\n");
 		return;
 	}
 
-	msm_property_install_range(&psde->property_info, "zpos", 0x0, 0,
-		max_blendstages, SDE_STAGE_BASE, PLANE_PROP_ZPOS);
+	if (sde_is_custom_client()) {
+		if (catalog->mixer_count && catalog->mixer)
+			zpos_max = catalog->mixer[0].sblk->maxblendstages;
+	} else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+		/* reserve zpos == 0 for primary planes */
+		zpos_def = drm_plane_index(plane) + 1;
+	}
+
+	msm_property_install_range(&psde->property_info, "zpos",
+		0x0, 0, zpos_max, zpos_def, PLANE_PROP_ZPOS);
 
 	msm_property_install_range(&psde->property_info, "alpha",
 		0x0, 0, 255, 255, PLANE_PROP_ALPHA);
@@ -1528,6 +1445,28 @@
 	msm_property_install_range(&psde->property_info, "input_fence",
 		0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
 
+	if (psde->pipe_sblk->maxhdeciexp) {
+		msm_property_install_range(&psde->property_info, "h_decimate",
+			0x0, 0, psde->pipe_sblk->maxhdeciexp, 0,
+			PLANE_PROP_H_DECIMATE);
+	}
+
+	if (psde->pipe_sblk->maxvdeciexp) {
+		msm_property_install_range(&psde->property_info, "v_decimate",
+				0x0, 0, psde->pipe_sblk->maxvdeciexp, 0,
+				PLANE_PROP_V_DECIMATE);
+	}
+
+	if (psde->features & SDE_SSPP_SCALER) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
+	}
+
+	if (psde->features & BIT(SDE_SSPP_CSC)) {
+		msm_property_install_volatile_range(&psde->property_info,
+			"csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
+	}
+
 	/* standard properties */
 	msm_property_install_rotation(&psde->property_info,
 		BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION);
@@ -1542,17 +1481,11 @@
 		msm_property_install_range(&psde->property_info, "color_fill",
 				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
 
-	if (psde->features & SDE_SSPP_SCALER)
-		msm_property_install_blob(&psde->property_info, "scaler", 0,
-			PLANE_PROP_SCALER);
-
-	if (psde->features & BIT(SDE_SSPP_CSC))
-		msm_property_install_blob(&psde->property_info, "csc", 0,
-			PLANE_PROP_CSC);
-
 	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
-	if (!info)
+	if (!info) {
+		SDE_ERROR("failed to allocate info memory\n");
 		return;
+	}
 
 	msm_property_install_blob(&psde->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
@@ -1586,22 +1519,112 @@
 	kfree(info);
 }
 
+static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
+{
+	struct sde_drm_csc_v1 csc_v1;
+	int i;
+
+	if (!psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+
+	psde->csc_usr_ptr = NULL;
+	if (!usr_ptr) {
+		SDE_DEBUG_PLANE(psde, "csc data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&csc_v1, usr_ptr, sizeof(csc_v1))) {
+		SDE_ERROR_PLANE(psde, "failed to copy csc data\n");
+		return;
+	}
+
+	/* populate from user space */
+	for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
+		psde->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
+	for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
+		psde->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
+		psde->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
+	}
+	for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
+		psde->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
+		psde->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
+	}
+	psde->csc_usr_ptr = &psde->csc_cfg;
+}
+
+static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
+{
+	struct sde_drm_scaler_v1 scale_v1;
+	struct sde_hw_pixel_ext *pe;
+	int i;
+
+	if (!psde) {
+		SDE_ERROR("invalid plane\n");
+		return;
+	}
+
+	psde->pixel_ext_usr = false;
+	if (!usr) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
+	if (copy_from_user(&scale_v1, usr, sizeof(scale_v1))) {
+		SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+		return;
+	}
+
+	/* populate from user space */
+	pe = &(psde->pixel_ext);
+	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->init_phase_x[i] = scale_v1.init_phase_x[i];
+		pe->phase_step_x[i] = scale_v1.phase_step_x[i];
+		pe->init_phase_y[i] = scale_v1.init_phase_y[i];
+		pe->phase_step_y[i] = scale_v1.phase_step_y[i];
+
+		pe->horz_filter[i] = scale_v1.horz_filter[i];
+		pe->vert_filter[i] = scale_v1.vert_filter[i];
+	}
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		pe->num_ext_pxls_left[i] = scale_v1.lr.num_pxls_start[i];
+		pe->num_ext_pxls_right[i] = scale_v1.lr.num_pxls_end[i];
+		pe->left_ftch[i] = scale_v1.lr.ftch_start[i];
+		pe->right_ftch[i] = scale_v1.lr.ftch_end[i];
+		pe->left_rpt[i] = scale_v1.lr.rpt_start[i];
+		pe->right_rpt[i] = scale_v1.lr.rpt_end[i];
+		pe->roi_w[i] = scale_v1.lr.roi[i];
+
+		pe->num_ext_pxls_top[i] = scale_v1.tb.num_pxls_start[i];
+		pe->num_ext_pxls_btm[i] = scale_v1.tb.num_pxls_end[i];
+		pe->top_ftch[i] = scale_v1.tb.ftch_start[i];
+		pe->btm_ftch[i] = scale_v1.tb.ftch_end[i];
+		pe->top_rpt[i] = scale_v1.tb.rpt_start[i];
+		pe->btm_rpt[i] = scale_v1.tb.rpt_end[i];
+		pe->roi_h[i] = scale_v1.tb.roi[i];
+	}
+	psde->pixel_ext_usr = true;
+
+	SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
 static int sde_plane_atomic_set_property(struct drm_plane *plane,
 		struct drm_plane_state *state, struct drm_property *property,
 		uint64_t val)
 {
-	struct sde_plane *psde;
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
 	struct sde_plane_state *pstate;
 	int idx, ret = -EINVAL;
 
-	DBG("");
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	if (!plane) {
 		SDE_ERROR("invalid plane\n");
 	} else if (!state) {
-		SDE_ERROR("invalid state\n");
+		SDE_ERROR_PLANE(psde, "invalid state\n");
 	} else {
-		psde = to_sde_plane(plane);
 		pstate = to_sde_plane_state(state);
 		ret = msm_property_atomic_set(&psde->property_info,
 				pstate->property_values, pstate->property_blobs,
@@ -1609,8 +1632,20 @@
 		if (!ret) {
 			idx = msm_property_index(&psde->property_info,
 					property);
-			if (idx == PLANE_PROP_INPUT_FENCE)
-				_sde_plane_set_input_fence(plane, pstate, val);
+			switch (idx) {
+			case PLANE_PROP_INPUT_FENCE:
+				_sde_plane_set_input_fence(psde, pstate, val);
+				break;
+			case PLANE_PROP_CSC_V1:
+				_sde_plane_set_csc_v1(psde, (void *)val);
+				break;
+			case PLANE_PROP_SCALER_V1:
+				_sde_plane_set_scaler_v1(psde, (void *)val);
+				break;
+			default:
+				/* nothing to do */
+				break;
+			}
 		}
 	}
 
@@ -1620,7 +1655,7 @@
 static int sde_plane_set_property(struct drm_plane *plane,
 		struct drm_property *property, uint64_t val)
 {
-	DBG("");
+	SDE_DEBUG("\n");
 
 	return sde_plane_atomic_set_property(plane,
 			plane->state, property, val);
@@ -1630,18 +1665,16 @@
 		const struct drm_plane_state *state,
 		struct drm_property *property, uint64_t *val)
 {
-	struct sde_plane *psde;
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
 	struct sde_plane_state *pstate;
 	int ret = -EINVAL;
 
-	DBG("");
-
 	if (!plane) {
 		SDE_ERROR("invalid plane\n");
 	} else if (!state) {
 		SDE_ERROR("invalid state\n");
 	} else {
-		psde = to_sde_plane(plane);
+		SDE_DEBUG_PLANE(psde, "\n");
 		pstate = to_sde_plane_state(state);
 		ret = msm_property_atomic_get(&psde->property_info,
 				pstate->property_values, pstate->property_blobs,
@@ -1653,13 +1686,11 @@
 
 static void sde_plane_destroy(struct drm_plane *plane)
 {
-	struct sde_plane *psde;
+	struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
 
-	DBG("");
+	SDE_DEBUG_PLANE(psde, "\n");
 
-	if (plane) {
-		psde = to_sde_plane(plane);
-
+	if (psde) {
 		_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
 
 		debugfs_remove_recursive(psde->debugfs_root);
@@ -1688,14 +1719,15 @@
 	struct sde_plane_state *pstate;
 
 	if (!plane || !state) {
-		SDE_ERROR("invalid plane/state\n");
+		SDE_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
 		return;
 	}
 
 	psde = to_sde_plane(plane);
 	pstate = to_sde_plane_state(state);
 
-	DBG("");
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	/* remove ref count for frame buffers */
 	if (state->fb)
@@ -1716,17 +1748,25 @@
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate;
 	struct sde_plane_state *old_state;
+	uint64_t input_fence_default;
 
-	if (!plane || !plane->state)
+	if (!plane) {
+		SDE_ERROR("invalid plane\n");
 		return NULL;
+	} else if (!plane->state) {
+		SDE_ERROR("invalid plane state\n");
+		return NULL;
+	}
 
 	old_state = to_sde_plane_state(plane->state);
 	psde = to_sde_plane(plane);
 	pstate = msm_property_alloc_state(&psde->property_info);
-	if (!pstate)
+	if (!pstate) {
+		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
 		return NULL;
+	}
 
-	DBG("");
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	/* duplicate value helper */
 	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
@@ -1736,14 +1776,14 @@
 	if (pstate->base.fb)
 		drm_framebuffer_reference(pstate->base.fb);
 
-	/* add ref count for fence */
-	if (pstate->input_fence) {
-		pstate->input_fence = 0;
-		_sde_plane_set_input_fence(plane, pstate, pstate->
-				property_values[PLANE_PROP_INPUT_FENCE]);
-	}
+	/* clear out any input fence */
+	pstate->input_fence = 0;
+	input_fence_default = msm_property_get_default(
+			&psde->property_info, PLANE_PROP_INPUT_FENCE);
+	msm_property_set_property(&psde->property_info, pstate->property_values,
+			PLANE_PROP_INPUT_FENCE, input_fence_default);
 
-	pstate->mode_changed = false;
+	pstate->dirty = 0x0;
 	pstate->pending = false;
 
 	return &pstate->base;
@@ -1760,7 +1800,7 @@
 	}
 
 	psde = to_sde_plane(plane);
-	SDE_DEBUG("%s\n", psde->pipe_name);
+	SDE_DEBUG_PLANE(psde, "\n");
 
 	/* remove previous state, if present */
 	if (plane->state) {
@@ -1769,8 +1809,10 @@
 	}
 
 	pstate = msm_property_alloc_state(&psde->property_info);
-	if (!pstate)
+	if (!pstate) {
+		SDE_ERROR_PLANE(psde, "failed to allocate state\n");
 		return;
+	}
 
 	/* reset value helper */
 	msm_property_reset_state(&psde->property_info, pstate,
@@ -1802,9 +1844,7 @@
 
 enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
 {
-	struct sde_plane *sde_plane = to_sde_plane(plane);
-
-	return sde_plane->pipe;
+	return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
 }
 
 static void _sde_plane_init_debugfs(struct sde_plane *psde, struct sde_kms *kms)
@@ -1831,14 +1871,14 @@
 			sde_debugfs_setup_regset32(&psde->debugfs_src,
 					sblk->src_blk.base + cfg->base,
 					sblk->src_blk.len,
-					kms->mmio);
+					kms);
 			sde_debugfs_create_regset32("src_blk", 0444,
 					psde->debugfs_root, &psde->debugfs_src);
 
 			sde_debugfs_setup_regset32(&psde->debugfs_scaler,
 					sblk->scaler_blk.base + cfg->base,
 					sblk->scaler_blk.len,
-					kms->mmio);
+					kms);
 			sde_debugfs_create_regset32("scaler_blk", 0444,
 					psde->debugfs_root,
 					&psde->debugfs_scaler);
@@ -1846,7 +1886,7 @@
 			sde_debugfs_setup_regset32(&psde->debugfs_csc,
 					sblk->csc_blk.base + cfg->base,
 					sblk->csc_blk.len,
-					kms->mmio);
+					kms);
 			sde_debugfs_create_regset32("csc_blk", 0444,
 					psde->debugfs_root, &psde->debugfs_csc);
 		}
@@ -1855,14 +1895,15 @@
 
 /* initialize plane */
 struct drm_plane *sde_plane_init(struct drm_device *dev,
-		uint32_t pipe, bool primary_plane)
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs)
 {
 	struct drm_plane *plane = NULL;
 	struct sde_plane *psde;
 	struct msm_drm_private *priv;
 	struct sde_kms *kms;
 	enum drm_plane_type type;
-	int ret = -EINVAL, max_blendstages = 255;
+	int ret = -EINVAL;
 
 	if (!dev) {
 		SDE_ERROR("[%u]device is NULL\n", pipe);
@@ -1914,13 +1955,10 @@
 	psde->features = psde->pipe_hw->cap->features;
 	psde->pipe_sblk = psde->pipe_hw->cap->sblk;
 	if (!psde->pipe_sblk) {
-		SDE_ERROR("invalid sblk on pipe %d\n", pipe);
+		SDE_ERROR("[%u]invalid sblk\n", pipe);
 		goto clean_sspp;
 	}
 
-	if (kms->catalog && kms->catalog->mixer_count && kms->catalog->mixer)
-		max_blendstages = kms->catalog->mixer[0].sblk->maxblendstages;
-
 	/* add plane to DRM framework */
 	psde->nformats = sde_populate_formats(psde->pipe_sblk->format_list,
 			psde->formats,
@@ -1938,9 +1976,8 @@
 		type = DRM_PLANE_TYPE_PRIMARY;
 	else
 		type = DRM_PLANE_TYPE_OVERLAY;
-	ret = drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs,
-				psde->formats, psde->nformats,
-				type);
+	ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+			&sde_plane_funcs, psde->formats, psde->nformats, type);
 	if (ret)
 		goto clean_sspp;
 
@@ -1952,7 +1989,7 @@
 			PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
 			sizeof(struct sde_plane_state));
 
-	_sde_plane_install_properties(plane, max_blendstages);
+	_sde_plane_install_properties(plane, kms->catalog);
 
 	/* save user friendly pipe name for later */
 	snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
@@ -1961,7 +1998,7 @@
 
 	_sde_plane_init_debugfs(psde, kms);
 
-	DRM_INFO("[%u]successfully created %s\n", pipe, psde->pipe_name);
+	DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
 	return plane;
 
 clean_sspp:
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index d4712ef..b17ac82 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -342,6 +342,7 @@
 	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
 		rc = PTR_ERR(rm->hw_mdp);
 		rm->hw_mdp = NULL;
+		SDE_ERROR("failed: mdp hw not available\n");
 		goto fail;
 	}
 
@@ -356,8 +357,10 @@
 
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
 				cat->mixer[i].id, &cat->mixer[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: lm hw not available\n");
 			goto fail;
+		}
 
 		if (!rm->lm_max_width) {
 			rm->lm_max_width = lm->sblk->maxwidth;
@@ -375,15 +378,19 @@
 	for (i = 0; i < cat->dspp_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
 				cat->dspp[i].id, &cat->dspp[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: dspp hw not available\n");
 			goto fail;
+		}
 	}
 
 	for (i = 0; i < cat->pingpong_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
 				cat->pingpong[i].id, &cat->pingpong[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: pp hw not available\n");
 			goto fail;
+		}
 	}
 
 	for (i = 0; i < cat->intf_count; i++) {
@@ -394,35 +401,42 @@
 
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
 				cat->intf[i].id, &cat->intf[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: intf hw not available\n");
 			goto fail;
+		}
 	}
 
 	for (i = 0; i < cat->wb_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
 				cat->wb[i].id, &cat->wb[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: wb hw not available\n");
 			goto fail;
+		}
 	}
 
 	for (i = 0; i < cat->ctl_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
 				cat->ctl[i].id, &cat->ctl[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: ctl hw not available\n");
 			goto fail;
+		}
 	}
 
 	for (i = 0; i < cat->cdm_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
 				cat->cdm[i].id, &cat->cdm[i]);
-		if (rc)
+		if (rc) {
+			SDE_ERROR("failed: cdm hw not available\n");
 			goto fail;
+		}
 	}
 
 	return 0;
 
 fail:
-	SDE_ERROR("failed to init sde kms resources\n");
 	sde_rm_destroy(rm);
 
 	return rc;
@@ -984,7 +998,8 @@
  */
 void _sde_rm_release_rsvp(
 		struct sde_rm *rm,
-		struct sde_rm_rsvp *rsvp)
+		struct sde_rm_rsvp *rsvp,
+		struct drm_connector *conn)
 {
 	struct sde_rm_rsvp *rsvp_c, *rsvp_n;
 	struct sde_rm_hw_blk *blk;
@@ -1020,6 +1035,12 @@
 	}
 
 	kfree(rsvp);
+
+	(void) msm_property_set_property(
+			sde_connector_get_propinfo(conn),
+			sde_connector_get_property_values(conn->state),
+			CONNECTOR_PROP_TOPOLOGY_NAME,
+			SDE_RM_TOPOLOGY_UNKNOWN);
 }
 
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1054,12 +1075,7 @@
 	} else {
 		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
 				rsvp->enc_id);
-		_sde_rm_release_rsvp(rm, rsvp);
-		(void) msm_property_set_property(
-				sde_connector_get_propinfo(conn),
-				sde_connector_get_property_values(conn->state),
-				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
+		_sde_rm_release_rsvp(rm, rsvp, conn);
 	}
 }
 
@@ -1078,7 +1094,7 @@
 			CONNECTOR_PROP_TOPOLOGY_NAME,
 			rsvp->topology);
 	if (ret)
-		_sde_rm_release_rsvp(rm, rsvp);
+		_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
 
 	/* Swap next rsvp to be the active */
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1168,7 +1184,7 @@
 	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
 		SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
 				rsvp_cur->seq, rsvp_cur->enc_id);
-		_sde_rm_release_rsvp(rm, rsvp_cur);
+		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 		rsvp_cur = NULL;
 		_sde_rm_print_rsvps(rm, "post_clear");
 	}
@@ -1181,7 +1197,7 @@
 
 	if (ret) {
 		SDE_ERROR("failed to reserve hw resources: %d\n", ret);
-		_sde_rm_release_rsvp(rm, rsvp_nxt);
+		_sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
 	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
 		/*
 		 * Normally, if test_only, test the reservation and then undo
@@ -1190,13 +1206,13 @@
 		 */
 		SDE_DEBUG("test_only: discard test rsvp[s%de%d]\n",
 				rsvp_nxt->seq, rsvp_nxt->enc_id);
-		_sde_rm_release_rsvp(rm, rsvp_nxt);
+		_sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
 	} else {
 		if (test_only && RM_RQ_LOCK(&reqs))
 			SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
 					rsvp_nxt->seq, rsvp_nxt->enc_id);
 
-		_sde_rm_release_rsvp(rm, rsvp_cur);
+		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 
 		ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 98e6955..855b12c 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -12,8 +12,8 @@
  *
  */
 
-#ifndef __SDE_KMS_RM_H__
-#define __SDE_KMS_RM_H__
+#ifndef __SDE_RM_H__
+#define __SDE_RM_H__
 
 #include <linux/list.h>
 
@@ -198,4 +198,4 @@
  */
 int sde_rm_check_property_topctl(uint64_t val);
 
-#endif /* __sde_kms_rm_H__ */
+#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
new file mode 100644
index 0000000..6060bde
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -0,0 +1,209 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "sde_vbif.h"
+#include "sde_hw_vbif.h"
+#include "sde_trace.h"
+
+/**
+ * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:	Pointer to hardware vbif driver
+ * @xin_id:	Client interface identifier
+ * @return:	0 if success; error code otherwise
+ */
+static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
+{
+	ktime_t timeout;
+	bool status;
+	int rc;
+
+	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+	for (;;) {
+		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+		if (status)
+			break;
+		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+			break;
+		}
+		usleep_range(501, 1000);
+	}
+
+	if (!status) {
+		rc = -ETIMEDOUT;
+		SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+				vbif->idx - VBIF_0, xin_id);
+	} else {
+		rc = 0;
+		SDE_DEBUG("VBIF %d client %d is halted\n",
+				vbif->idx - VBIF_0, xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @ot_lim:	Pointer to OT limit to be modified
+ * @params:	Pointer to usecase parameters
+ */
+static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
+		u32 *ot_lim, struct sde_vbif_set_ot_params *params)
+{
+	u64 pps;
+	const struct sde_vbif_dynamic_ot_tbl *tbl;
+	u32 i;
+
+	if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
+		return;
+
+	/* Dynamic OT setting done only for WFD */
+	if (!params->is_wfd)
+		return;
+
+	pps = params->frame_rate;
+	pps *= params->width;
+	pps *= params->height;
+
+	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+			&vbif->cap->dynamic_ot_wr_tbl;
+
+	for (i = 0; i < tbl->count; i++) {
+		if (pps <= tbl->cfg[i].pps) {
+			*ot_lim = tbl->cfg[i].ot_limit;
+			break;
+		}
+	}
+
+	SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+			vbif->idx - VBIF_0, params->xin_id,
+			params->width, params->height, params->frame_rate,
+			pps, *ot_lim);
+}
+
+/**
+ * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ * @return:	OT limit
+ */
+static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
+	struct sde_vbif_set_ot_params *params)
+{
+	u32 ot_lim = 0;
+	u32 val;
+
+	if (!vbif || !vbif->cap) {
+		SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	if (vbif->cap->default_ot_wr_limit && !params->rd)
+		ot_lim = vbif->cap->default_ot_wr_limit;
+	else if (vbif->cap->default_ot_rd_limit && params->rd)
+		ot_lim = vbif->cap->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt/catalog,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	_sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+	if (vbif && vbif->ops.get_limit_conf) {
+		val = vbif->ops.get_limit_conf(vbif,
+				params->xin_id, params->rd);
+		if (val == ot_lim)
+			ot_lim = 0;
+	}
+
+exit:
+	SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+			vbif->idx - VBIF_0, params->xin_id, ot_lim);
+	return ot_lim;
+}
+
+/**
+ * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+		struct sde_vbif_set_ot_params *params)
+{
+	struct sde_hw_vbif *vbif = NULL;
+	struct sde_hw_mdp *mdp;
+	bool forced_on = false;
+	u32 ot_lim;
+	int ret, i;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = sde_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		if (sde_kms->hw_vbif[i] &&
+				sde_kms->hw_vbif[i]->idx == params->vbif_idx)
+			vbif = sde_kms->hw_vbif[i];
+	}
+
+	if (!vbif || !mdp) {
+		SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
+				vbif != 0, mdp != 0);
+		return;
+	}
+
+	if (!mdp->ops.setup_clk_force_ctrl ||
+			!vbif->ops.set_limit_conf ||
+			!vbif->ops.set_halt_ctrl)
+		return;
+
+	ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
+		params->vbif_idx);
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+	ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
+	if (ret)
+		MSM_EVT(sde_kms->dev, vbif->idx, params->xin_id);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+	return;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
new file mode 100644
index 0000000..703befa
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_VBIF_H__
+#define __SDE_VBIF_H__
+
+#include "sde_kms.h"
+
+struct sde_vbif_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u32 frame_rate;
+	bool rd;
+	bool is_wfd;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+};
+
+/**
+ * sde_vbif_set_ot_limit - set OT limit for vbif client
+ * @sde_kms:	SDE handler
+ * @params:	Pointer to OT configuration parameters
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+		struct sde_vbif_set_ot_params *params);
+
+#endif /* __SDE_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 7385086..e2e8e60 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -90,7 +90,7 @@
 	supply_root_node = of_get_child_by_name(of_node,
 						"qcom,platform-supply-entries");
 	if (!supply_root_node) {
-		pr_err("no supply entry present\n");
+		pr_debug("no supply entry present\n");
 		return rc;
 	}
 
@@ -220,29 +220,33 @@
 {
 	u32 i = 0, rc = 0;
 	const char *clock_name;
-	u32 clock_rate;
+	u32 clock_rate = 0;
+	u32 clock_max_rate = 0;
+	int num_clk = 0;
 
 	if (!pdev || !mp) {
 		pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
 		return -EINVAL;
 	}
 
-	mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+	mp->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node,
 							"clock-names");
-	if (mp->num_clk <= 0) {
-		pr_err("clocks are not defined\n");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
 		goto clk_err;
 	}
 
+	mp->num_clk = num_clk;
 	mp->clk_config = devm_kzalloc(&pdev->dev,
-			sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+			sizeof(struct dss_clk) * num_clk, GFP_KERNEL);
 	if (!mp->clk_config) {
 		rc = -ENOMEM;
 		mp->num_clk = 0;
 		goto clk_err;
 	}
 
-	for (i = 0; i < mp->num_clk; i++) {
+	for (i = 0; i < num_clk; i++) {
 		of_property_read_string_index(pdev->dev.of_node, "clock-names",
 							i, &clock_name);
 		strlcpy(mp->clk_config[i].clk_name, clock_name,
@@ -256,6 +260,11 @@
 			mp->clk_config[i].type = DSS_CLK_AHB;
 		else
 			mp->clk_config[i].type = DSS_CLK_PCLK;
+
+		clock_max_rate = 0;
+		of_property_read_u32_index(pdev->dev.of_node, "clock-max-rate",
+							i, &clock_max_rate);
+		mp->clk_config[i].max_rate = clock_max_rate;
 	}
 
 clk_err:
@@ -401,7 +410,7 @@
 {
 	struct dss_module_power *mp;
 
-	if (!phandle) {
+	if (!phandle || !pdev) {
 		pr_err("invalid input param\n");
 		return;
 	}
@@ -413,8 +422,12 @@
 
 	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
 
-	devm_kfree(&pdev->dev, mp->clk_config);
-	devm_kfree(&pdev->dev, mp->vreg_config);
+	if (mp->clk_config)
+		devm_kfree(&pdev->dev, mp->clk_config);
+
+	if (mp->vreg_config)
+		devm_kfree(&pdev->dev, mp->vreg_config);
+
 	mp->num_vreg = 0;
 	mp->num_clk = 0;
 }
@@ -521,6 +534,10 @@
 
 	for (i = 0; i < mp->num_clk; i++) {
 		if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+			if (mp->clk_config[i].max_rate &&
+					(rate > mp->clk_config[i].max_rate))
+				rate = mp->clk_config[i].max_rate;
+
 			mp->clk_config[i].rate = rate;
 			rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
 			break;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 886ea83..c422f17 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -43,6 +43,9 @@
 	axictl = (axictl &
 		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
 		  TMC_AXICTL_PROT_CTL_B1;
+	axictl = (axictl &
+		  ~(TMC_AXICTL_CACHE_CTL_B0 | TMC_AXICTL_CACHE_CTL_B1)) |
+		  TMC_AXICTL_CACHE_CTL_B0 | TMC_AXICTL_CACHE_CTL_B1;
 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
 
 	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 44b3ae3..8bd3a2a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -57,6 +57,8 @@
 /* TMC_AXICTL - 0x110 */
 #define TMC_AXICTL_PROT_CTL_B0	BIT(0)
 #define TMC_AXICTL_PROT_CTL_B1	BIT(1)
+#define TMC_AXICTL_CACHE_CTL_B0	BIT(2)
+#define TMC_AXICTL_CACHE_CTL_B1	BIT(3)
 #define TMC_AXICTL_SCT_GAT_MODE	BIT(7)
 #define TMC_AXICTL_WR_BURST_16	0xF00
 /* TMC_FFCR - 0x304 */
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/net/ethernet/msm/rndis_ipa.c
index b218cb3..29596f6 100644
--- a/drivers/net/ethernet/msm/rndis_ipa.c
+++ b/drivers/net/ethernet/msm/rndis_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -124,29 +124,6 @@
 	(RNDIS_IPA_DEBUG("Driver state: %s\n",\
 	rndis_ipa_state_string((ctx)->state)))
 
-/**
- * struct rndis_loopback_pipe - hold all information needed for
- *  pipe loopback logic
- */
-struct rndis_loopback_pipe {
-	struct sps_pipe          *ipa_sps;
-	struct ipa_sps_params ipa_sps_connect;
-	struct ipa_connect_params ipa_connect_params;
-
-	struct sps_pipe          *dma_sps;
-	struct sps_connect        dma_connect;
-
-	struct sps_alloc_dma_chan dst_alloc;
-	struct sps_dma_chan       ipa_sps_channel;
-	enum sps_mode mode;
-	u32 ipa_peer_bam_hdl;
-	u32 peer_pipe_index;
-	u32 ipa_drv_ep_hdl;
-	u32 ipa_pipe_index;
-	enum ipa_client_type ipa_client;
-	ipa_notify_cb ipa_callback;
-	struct ipa_ep_cfg *ipa_ep_cfg;
-};
 
 /**
  * struct rndis_ipa_dev - main driver context parameters
@@ -161,13 +138,9 @@
  * @rx_dump_enable: dump all Rx packets
  * @icmp_filter: allow all ICMP packet to pass through the filters
  * @rm_enable: flag that enable/disable Resource manager request prior to Tx
- * @loopback_enable:  flag that enable/disable USB stub loopback
  * @deaggregation_enable: enable/disable IPA HW deaggregation logic
  * @during_xmit_error: flags that indicate that the driver is in a middle
  *  of error handling in Tx path
- * @usb_to_ipa_loopback_pipe: usb to ipa (Rx) pipe representation for loopback
- * @ipa_to_usb_loopback_pipe: ipa to usb (Tx) pipe representation for loopback
- * @bam_dma_hdl: handle representing bam-dma, used for loopback logic
  * @directory: holds all debug flags used by the driver to allow cleanup
  *  for driver unload
  * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
@@ -197,12 +170,8 @@
 	bool rx_dump_enable;
 	bool icmp_filter;
 	bool rm_enable;
-	bool loopback_enable;
 	bool deaggregation_enable;
 	bool during_xmit_error;
-	struct rndis_loopback_pipe usb_to_ipa_loopback_pipe;
-	struct rndis_loopback_pipe ipa_to_usb_loopback_pipe;
-	u32 bam_dma_hdl;
 	struct dentry *directory;
 	u32 eth_ipv4_hdr_hdl;
 	u32 eth_ipv6_hdr_hdl;
@@ -269,17 +238,6 @@
 static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx);
 static netdev_tx_t rndis_ipa_start_xmit
 	(struct sk_buff *skb, struct net_device *net);
-static int rndis_ipa_loopback_pipe_create
-	(struct rndis_ipa_dev *rndis_ipa_ctx,
-	struct rndis_loopback_pipe *loopback_pipe);
-static void rndis_ipa_destroy_loopback_pipe
-	(struct rndis_loopback_pipe *loopback_pipe);
-static int rndis_ipa_create_loopback(struct rndis_ipa_dev *rndis_ipa_ctx);
-static void rndis_ipa_destroy_loopback(struct rndis_ipa_dev *rndis_ipa_ctx);
-static int rndis_ipa_setup_loopback
-	(bool enable, struct rndis_ipa_dev *rndis_ipa_ctx);
-static int rndis_ipa_debugfs_loopback_open
-	(struct inode *inode, struct file *file);
 static int rndis_ipa_debugfs_atomic_open
 	(struct inode *inode, struct file *file);
 static int rndis_ipa_debugfs_aggr_open
@@ -287,18 +245,6 @@
 static ssize_t rndis_ipa_debugfs_aggr_write
 	(struct file *file,
 	const char __user *buf, size_t count, loff_t *ppos);
-static ssize_t rndis_ipa_debugfs_loopback_write
-	(struct file *file,
-	const char __user *buf, size_t count, loff_t *ppos);
-static ssize_t rndis_ipa_debugfs_enable_write
-	(struct file *file,
-	const char __user *buf, size_t count, loff_t *ppos);
-static ssize_t rndis_ipa_debugfs_enable_read
-	(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos);
-static ssize_t rndis_ipa_debugfs_loopback_read
-	(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos);
 static ssize_t rndis_ipa_debugfs_atomic_read
 	(struct file *file,
 	char __user *ubuf, size_t count, loff_t *ppos);
@@ -336,12 +282,6 @@
 	.read = rndis_ipa_debugfs_atomic_read,
 };
 
-const struct file_operations rndis_ipa_loopback_ops = {
-		.open = rndis_ipa_debugfs_loopback_open,
-		.read = rndis_ipa_debugfs_loopback_read,
-		.write = rndis_ipa_debugfs_loopback_write,
-};
-
 const struct file_operations rndis_ipa_aggr_ops = {
 		.open = rndis_ipa_debugfs_aggr_open,
 		.write = rndis_ipa_debugfs_aggr_write,
@@ -2254,15 +2194,6 @@
 		goto fail_file;
 	}
 
-	file = debugfs_create_file
-		("loopback_enable", flags_read_write,
-		rndis_ipa_ctx->directory,
-		rndis_ipa_ctx, &rndis_ipa_loopback_ops);
-	if (!file) {
-		RNDIS_IPA_ERROR("could not create outstanding file\n");
-		goto fail_file;
-	}
-
 	file = debugfs_create_u8
 		("state", flags_read_only,
 		rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state);
@@ -2434,59 +2365,6 @@
 	return count;
 }
 
-static int rndis_ipa_debugfs_loopback_open
-	(struct inode *inode, struct file *file)
-{
-	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
-
-	file->private_data = rndis_ipa_ctx;
-
-	return 0;
-}
-
-static ssize_t rndis_ipa_debugfs_loopback_read
-	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
-{
-	int cnt;
-	struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
-
-	file->private_data = &rndis_ipa_ctx->loopback_enable;
-
-	cnt = rndis_ipa_debugfs_enable_read
-		(file, ubuf, count, ppos);
-
-	return cnt;
-}
-
-static ssize_t rndis_ipa_debugfs_loopback_write
-	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
-	int retval;
-	int cnt;
-	struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
-	bool old_state = rndis_ipa_ctx->loopback_enable;
-
-	file->private_data = &rndis_ipa_ctx->loopback_enable;
-
-	cnt = rndis_ipa_debugfs_enable_write(file, buf, count, ppos);
-
-	RNDIS_IPA_DEBUG("loopback_enable was set to:%d->%d\n",
-			old_state, rndis_ipa_ctx->loopback_enable);
-
-	if (old_state == rndis_ipa_ctx->loopback_enable) {
-		RNDIS_IPA_ERROR("NOP - same state\n");
-		return cnt;
-	}
-
-	retval = rndis_ipa_setup_loopback(
-				rndis_ipa_ctx->loopback_enable,
-				rndis_ipa_ctx);
-	if (retval)
-		rndis_ipa_ctx->loopback_enable = old_state;
-
-	return cnt;
-}
-
 static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
 {
 	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
@@ -2518,327 +2396,6 @@
 	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
 }
 
-static ssize_t rndis_ipa_debugfs_enable_read
-	(struct file *file,
-	char __user *ubuf, size_t count, loff_t *ppos)
-{
-	int nbytes;
-	int size = 0;
-	int ret;
-	loff_t pos;
-	u8 enable_str[sizeof(char) * 3] = {0};
-	bool *enable = file->private_data;
-
-	pos = *ppos;
-	nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
-	ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
-	if (ret < 0) {
-		RNDIS_IPA_ERROR("simple_read_from_buffer problem\n");
-		return ret;
-	}
-	size += ret;
-	count -= nbytes;
-	*ppos = pos + size;
-	return size;
-}
-
-static ssize_t rndis_ipa_debugfs_enable_write
-	(struct file *file,
-	const char __user *buf, size_t count, loff_t *ppos)
-{
-	unsigned long missing;
-	char input;
-	bool *enable = file->private_data;
-
-	if (count != sizeof(input) + 1) {
-		RNDIS_IPA_ERROR("wrong input length(%zd)\n", count);
-		return -EINVAL;
-	}
-	if (!buf) {
-		RNDIS_IPA_ERROR("Bad argument\n");
-		return -EINVAL;
-	}
-	missing = copy_from_user(&input, buf, 1);
-	if (missing)
-		return -EFAULT;
-	RNDIS_IPA_DEBUG("input received %c\n", input);
-	*enable = input - '0';
-	RNDIS_IPA_DEBUG("value was set to %d\n", *enable);
-	return count;
-}
-
-/**
- * Connects IPA->BAMDMA
- * This shall simulate the path from IPA to USB
- * Allowing the driver TX path
- */
-static int rndis_ipa_loopback_pipe_create(
-		struct rndis_ipa_dev *rndis_ipa_ctx,
-		struct rndis_loopback_pipe *loopback_pipe)
-{
-	int retval;
-
-	RNDIS_IPA_LOG_ENTRY();
-
-	/* SPS pipe has two side handshake
-	 * This is the first handshake of IPA->BAMDMA,
-	 * This is the IPA side
-	 */
-	loopback_pipe->ipa_connect_params.client = loopback_pipe->ipa_client;
-	loopback_pipe->ipa_connect_params.client_bam_hdl =
-			rndis_ipa_ctx->bam_dma_hdl;
-	loopback_pipe->ipa_connect_params.client_ep_idx =
-		loopback_pipe->peer_pipe_index;
-	loopback_pipe->ipa_connect_params.desc_fifo_sz = BAM_DMA_DESC_FIFO_SIZE;
-	loopback_pipe->ipa_connect_params.data_fifo_sz = BAM_DMA_DATA_FIFO_SIZE;
-	loopback_pipe->ipa_connect_params.notify = loopback_pipe->ipa_callback;
-	loopback_pipe->ipa_connect_params.priv = rndis_ipa_ctx;
-	loopback_pipe->ipa_connect_params.ipa_ep_cfg =
-		*loopback_pipe->ipa_ep_cfg;
-
-	/* loopback_pipe->ipa_sps_connect is out param */
-	retval = ipa_connect
-		(&loopback_pipe->ipa_connect_params,
-		&loopback_pipe->ipa_sps_connect,
-		&loopback_pipe->ipa_drv_ep_hdl);
-	if (retval) {
-		RNDIS_IPA_ERROR("ipa_connect() fail (%d)", retval);
-		return retval;
-	}
-	RNDIS_IPA_DEBUG("ipa_connect() succeeded, ipa_drv_ep_hdl=%d",
-			loopback_pipe->ipa_drv_ep_hdl);
-
-	/* SPS pipe has two side handshake
-	 * This is the second handshake of IPA->BAMDMA,
-	 * This is the BAMDMA side
-	 */
-	loopback_pipe->dma_sps = sps_alloc_endpoint();
-	if (!loopback_pipe->dma_sps) {
-		RNDIS_IPA_ERROR("sps_alloc_endpoint() failed ");
-		retval = -ENOMEM;
-		goto fail_sps_alloc;
-	}
-
-	retval = sps_get_config
-		(loopback_pipe->dma_sps,
-		&loopback_pipe->dma_connect);
-	if (retval) {
-		RNDIS_IPA_ERROR("sps_get_config() failed (%d)", retval);
-		goto fail_get_cfg;
-	}
-
-	/* Start setting the non IPA ep for SPS driver*/
-	loopback_pipe->dma_connect.mode = loopback_pipe->mode;
-
-	/* SPS_MODE_DEST: DMA end point is the dest (consumer) IPA->DMA */
-	if (loopback_pipe->mode == SPS_MODE_DEST) {
-		loopback_pipe->dma_connect.source =
-				loopback_pipe->ipa_sps_connect.ipa_bam_hdl;
-		loopback_pipe->dma_connect.src_pipe_index =
-				loopback_pipe->ipa_sps_connect.ipa_ep_idx;
-		loopback_pipe->dma_connect.destination =
-				rndis_ipa_ctx->bam_dma_hdl;
-		loopback_pipe->dma_connect.dest_pipe_index =
-				loopback_pipe->peer_pipe_index;
-
-	/* SPS_MODE_SRC: DMA end point is the source (producer) DMA->IPA */
-	} else {
-		loopback_pipe->dma_connect.source =
-				rndis_ipa_ctx->bam_dma_hdl;
-		loopback_pipe->dma_connect.src_pipe_index =
-				loopback_pipe->peer_pipe_index;
-		loopback_pipe->dma_connect.destination =
-				loopback_pipe->ipa_sps_connect.ipa_bam_hdl;
-		loopback_pipe->dma_connect.dest_pipe_index =
-				loopback_pipe->ipa_sps_connect.ipa_ep_idx;
-	}
-
-	loopback_pipe->dma_connect.desc = loopback_pipe->ipa_sps_connect.desc;
-	loopback_pipe->dma_connect.data = loopback_pipe->ipa_sps_connect.data;
-	loopback_pipe->dma_connect.event_thresh = 0x10;
-	/* BAM-to-BAM */
-	loopback_pipe->dma_connect.options = SPS_O_AUTO_ENABLE;
-
-	RNDIS_IPA_DEBUG("doing sps_connect() with - ");
-	RNDIS_IPA_DEBUG
-		("src bam_hdl:0x%lx, src_pipe#:%d",
-		loopback_pipe->dma_connect.source,
-		loopback_pipe->dma_connect.src_pipe_index);
-	RNDIS_IPA_DEBUG
-		("dst bam_hdl:0x%lx, dst_pipe#:%d",
-		loopback_pipe->dma_connect.destination,
-		loopback_pipe->dma_connect.dest_pipe_index);
-
-	retval = sps_connect
-		(loopback_pipe->dma_sps,
-		&loopback_pipe->dma_connect);
-	if (retval) {
-		RNDIS_IPA_ERROR
-			("sps_connect() fail for BAMDMA side (%d)",
-			retval);
-		goto fail_sps_connect;
-	}
-
-	RNDIS_IPA_LOG_EXIT();
-
-	return 0;
-
-fail_sps_connect:
-fail_get_cfg:
-	sps_free_endpoint(loopback_pipe->dma_sps);
-fail_sps_alloc:
-	ipa_disconnect(loopback_pipe->ipa_drv_ep_hdl);
-	return retval;
-}
-
-static void rndis_ipa_destroy_loopback_pipe(
-		struct rndis_loopback_pipe *loopback_pipe)
-{
-	sps_disconnect(loopback_pipe->dma_sps);
-	sps_free_endpoint(loopback_pipe->dma_sps);
-}
-
-/**
- * rndis_ipa_create_loopback() - create a BAM-DMA loopback
- *  in order to replace the USB core
- */
-static int rndis_ipa_create_loopback(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	/* The BAM handle should be use as
-	 * source/destination in the sps_connect()
-	 */
-	int retval;
-
-	RNDIS_IPA_LOG_ENTRY();
-
-	retval = sps_ctrl_bam_dma_clk(true);
-	if (retval) {
-		RNDIS_IPA_ERROR("fail on enabling BAM-DMA clocks");
-		return -ENODEV;
-	}
-
-	/* Get BAM handle instead of USB handle */
-	rndis_ipa_ctx->bam_dma_hdl = sps_dma_get_bam_handle();
-	if (!rndis_ipa_ctx->bam_dma_hdl) {
-		RNDIS_IPA_ERROR("sps_dma_get_bam_handle() failed");
-		return -ENODEV;
-	}
-	RNDIS_IPA_DEBUG("sps_dma_get_bam_handle() succeeded (0x%x)",
-			rndis_ipa_ctx->bam_dma_hdl);
-
-	/* IPA<-BAMDMA, NetDev Rx path (BAMDMA is the USB stub) */
-	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_client =
-	IPA_CLIENT_USB_PROD;
-	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.peer_pipe_index =
-		FROM_USB_TO_IPA_BAMDMA;
-	/*DMA EP mode*/
-	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.mode = SPS_MODE_SRC;
-	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_ep_cfg =
-		&usb_to_ipa_ep_cfg_deaggr_en;
-	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_callback =
-			rndis_ipa_packet_receive_notify;
-	RNDIS_IPA_DEBUG("setting up IPA<-BAMDAM pipe (RNDIS_IPA RX path)");
-	retval = rndis_ipa_loopback_pipe_create
-		(rndis_ipa_ctx,
-		&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
-	if (retval) {
-		RNDIS_IPA_ERROR
-		("fail to close IPA->BAMDAM pipe");
-		goto fail_to_usb;
-	}
-	RNDIS_IPA_DEBUG("IPA->BAMDAM pipe successfully connected (TX path)");
-
-	/* IPA->BAMDMA, NetDev Tx path (BAMDMA is the USB stub)*/
-	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_client =
-		IPA_CLIENT_USB_CONS;
-	/*DMA EP mode*/
-	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.mode = SPS_MODE_DEST;
-	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_ep_cfg = &ipa_to_usb_ep_cfg;
-	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.peer_pipe_index =
-		FROM_IPA_TO_USB_BAMDMA;
-	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_callback =
-			rndis_ipa_tx_complete_notify;
-	RNDIS_IPA_DEBUG("setting up IPA->BAMDAM pipe (RNDIS_IPA TX path)");
-	retval = rndis_ipa_loopback_pipe_create
-		(rndis_ipa_ctx,
-		&rndis_ipa_ctx->ipa_to_usb_loopback_pipe);
-	if (retval) {
-		RNDIS_IPA_ERROR("fail to close IPA<-BAMDAM pipe");
-		goto fail_from_usb;
-	}
-	RNDIS_IPA_DEBUG("IPA<-BAMDAM pipe successfully connected(RX path)");
-
-	RNDIS_IPA_LOG_EXIT();
-
-	return 0;
-
-fail_from_usb:
-	rndis_ipa_destroy_loopback_pipe(
-			&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
-fail_to_usb:
-
-	return retval;
-}
-
-static void rndis_ipa_destroy_loopback(struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	rndis_ipa_destroy_loopback_pipe(
-			&rndis_ipa_ctx->ipa_to_usb_loopback_pipe);
-	rndis_ipa_destroy_loopback_pipe(
-			&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
-	sps_dma_free_bam_handle(rndis_ipa_ctx->bam_dma_hdl);
-	if (sps_ctrl_bam_dma_clk(false))
-		RNDIS_IPA_ERROR("fail to disable BAM-DMA clocks");
-}
-
-/**
- * rndis_ipa_setup_loopback() - create/destroy a loopback on IPA HW
- *  (as USB pipes loopback) and notify RNDIS_IPA netdev for pipe connected
- * @enable: flag that determines if the loopback should be created or destroyed
- * @rndis_ipa_ctx: driver main context
- *
- * This function is the main loopback logic.
- * It shall create/destroy the loopback by using BAM-DMA and notify
- * the netdev accordingly.
- */
-static int rndis_ipa_setup_loopback
-	(bool enable, struct rndis_ipa_dev *rndis_ipa_ctx)
-{
-	int retval;
-
-	if (!enable) {
-		rndis_ipa_destroy_loopback(rndis_ipa_ctx);
-		RNDIS_IPA_DEBUG("loopback destroy done");
-		retval = rndis_ipa_pipe_disconnect_notify(rndis_ipa_ctx);
-		if (retval) {
-			RNDIS_IPA_ERROR("connect notify fail");
-			return -ENODEV;
-		}
-		return 0;
-	}
-
-	RNDIS_IPA_DEBUG("creating loopback (instead of USB core)");
-	retval = rndis_ipa_create_loopback(rndis_ipa_ctx);
-	RNDIS_IPA_DEBUG("creating loopback- %s", (retval ? "FAIL" : "OK"));
-	if (retval) {
-		RNDIS_IPA_ERROR("Fail to connect loopback");
-		return -ENODEV;
-	}
-	retval = rndis_ipa_pipe_connect_notify(
-			rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_drv_ep_hdl,
-			rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_drv_ep_hdl,
-			BAM_DMA_DATA_FIFO_SIZE,
-			15,
-			BAM_DMA_DATA_FIFO_SIZE - rndis_ipa_ctx->net->mtu,
-			rndis_ipa_ctx);
-	if (retval) {
-		RNDIS_IPA_ERROR("connect notify fail");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
 static int rndis_ipa_init_module(void)
 {
 	pr_info("RNDIS_IPA module is loaded.");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e4bf07d..4aba42f 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -24,6 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 
 const struct of_device_id of_default_bus_match_table[] = {
@@ -190,6 +191,7 @@
 	dev->dev.platform_data = platform_data;
 	of_dma_configure(&dev->dev, dev->dev.of_node);
 	of_msi_configure(&dev->dev, dev->dev.of_node);
+	of_reserved_mem_device_init_by_idx(&dev->dev, dev->dev.of_node, 0);
 
 	if (of_device_add(dev) != 0) {
 		of_dma_deconfigure(&dev->dev);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5a288f1..f049220 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -80,6 +80,25 @@
 	  The driver provides an interface to items in a heap shared among all
 	  processors in a Qualcomm platform.
 
+config MSM_SERVICE_LOCATOR
+	bool "Service Locator"
+	depends on MSM_QMI_INTERFACE
+	help
+	  The Service Locator provides a library to retrieve location
+	  information given a service identifier. Location here translates
+	  to what process domain exports the service, and which subsystem
+	  that process domain will execute in.
+
+config MSM_SERVICE_NOTIFIER
+	bool "Service Notifier"
+	depends on MSM_SERVICE_LOCATOR && MSM_SUBSYSTEM_RESTART
+	help
+	  The Service Notifier provides a library for a kernel client to
+	  register for state change notifications regarding a remote service.
+	  A remote service here refers to a process providing certain services
+	  like audio, the identifier for which is provided by the service
+	  locator.
+
 config QCOM_SMD
 	tristate "Qualcomm Shared Memory Driver (SMD)"
 	depends on QCOM_SMEM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index a2c2294..3a7be66 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -32,6 +32,8 @@
 obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
 obj-$(CONFIG_QTI_RPMH_API) += rpmh.o
 obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
+obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
+obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
 obj-$(CONFIG_MSM_SMP2P) += msm_smp2p.o smp2p_debug.o smp2p_sleepstate.o
 obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
 obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 1e593e0..0dd8b26 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -627,6 +627,7 @@
 	char fw_name[30];
 	int num = seg->num;
 	const struct firmware *fw = NULL;
+	void __iomem *firmware_buf;
 	struct pil_map_fw_info map_fw_info = {
 		.attrs = desc->attrs,
 		.region = desc->priv->region,
@@ -638,20 +639,28 @@
 	if (seg->filesz) {
 		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
 				desc->fw_name, num);
+		firmware_buf = desc->map_fw_mem(seg->paddr, seg->filesz,
+						map_data);
+		if (!firmware_buf) {
+			pil_err(desc, "Failed to map memory for firmware buffer\n");
+			return -ENOMEM;
+		}
+
 		ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
-						map_data, seg->filesz);
-		if (ret < 0) {
+						firmware_buf, seg->filesz);
+		desc->unmap_fw_mem(firmware_buf, seg->filesz, map_data);
+
+		if (ret) {
 			pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
 				fw_name, ret);
 			return ret;
 		}
 
-		if (ret != seg->filesz) {
+		if (fw->size != seg->filesz) {
 			pil_err(desc, "Blob size %u doesn't match %lu\n",
 					ret, seg->filesz);
 			return -EPERM;
 		}
-		ret = 0;
 	}
 
 	/* Zero out trailing memory */
diff --git a/drivers/soc/qcom/service-locator-private.h b/drivers/soc/qcom/service-locator-private.h
new file mode 100644
index 0000000..df42080
--- /dev/null
+++ b/drivers/soc/qcom/service-locator-private.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SERVICE_LOCATOR_V01_H
+#define SERVICE_LOCATOR_V01_H
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/service-locator.h>
+
+#define SERVREG_LOC_SERVICE_ID_V01 0x40
+#define SERVREG_LOC_SERVICE_VERS_V01 0x01
+
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_REQ_V01 0x0022
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_V01 0x0021
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_V01 0x0021
+#define QMI_SERVREG_LOC_DATABASE_UPDATED_IND_V01 0x0023
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_RESP_V01 0x0022
+
+#define QMI_SERVREG_LOC_NAME_LENGTH_V01 64
+#define QMI_SERVREG_LOC_LIST_LENGTH_V01 32
+
+enum qmi_servreg_loc_service_instance_enum_type_v01 {
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_APSS_V01 = 0x1,
+	QMI_SERVREG_LOC_SERVICE_INSTANCE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_servreg_loc_indication_register_req_msg_v01 {
+	uint8_t enable_database_updated_indication_valid;
+	uint8_t enable_database_updated_indication;
+};
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 4
+struct elem_info qmi_servreg_loc_indication_register_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_indication_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_LOC_INDICATION_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_loc_indication_register_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_get_domain_list_req_msg_v01 {
+	char service_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint8_t domain_offset_valid;
+	uint32_t domain_offset;
+};
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_MSG_V01_MAX_MSG_LEN 74
+struct elem_info qmi_servreg_loc_get_domain_list_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_get_domain_list_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t total_domains_valid;
+	uint16_t total_domains;
+	uint8_t db_rev_count_valid;
+	uint16_t db_rev_count;
+	uint8_t domain_list_valid;
+	uint32_t domain_list_len;
+	struct servreg_loc_entry_v01
+				domain_list[QMI_SERVREG_LOC_LIST_LENGTH_V01];
+};
+#define QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_MSG_V01_MAX_MSG_LEN 2389
+struct elem_info qmi_servreg_loc_get_domain_list_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_register_service_list_req_msg_v01 {
+	char domain_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint32_t service_list_len;
+	struct servreg_loc_entry_v01
+				service_list[QMI_SERVREG_LOC_LIST_LENGTH_V01];
+};
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 2439
+struct elem_info qmi_servreg_loc_register_service_list_req_msg_v01_ei[];
+
+struct qmi_servreg_loc_register_service_list_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_LOC_REGISTER_SERVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_loc_register_service_list_resp_msg_v01_ei[];
+
+struct qmi_servreg_loc_database_updated_ind_msg_v01 {
+	char placeholder;
+};
+#define QMI_SERVREG_LOC_DATABASE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 0
+struct elem_info qmi_servreg_loc_database_updated_ind_msg_v01_ei[];
+
+#define QMI_EOTI_DATA_TYPE	\
+{				\
+	.data_type = QMI_EOTI,	\
+	.elem_len  = 0,		\
+	.elem_size = 0,		\
+	.is_array  = NO_ARRAY,	\
+	.tlv_type  = 0x00,	\
+	.offset    = 0,		\
+	.ei_array  = NULL,	\
+},
+
+static struct elem_info servreg_loc_entry_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   instance_id),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   service_data_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_loc_entry_v01,
+					   service_data),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_indication_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_indication_register_req_msg_v01,
+				enable_database_updated_indication_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_indication_register_req_msg_v01,
+				enable_database_updated_indication),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_indication_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_indication_register_resp_msg_v01,
+			resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_get_domain_list_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				service_name),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				domain_offset_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_req_msg_v01,
+				domain_offset),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_get_domain_list_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				total_domains_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				total_domains),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				db_rev_count_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				db_rev_count),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_SERVREG_LOC_LIST_LENGTH_V01,
+		.elem_size      = sizeof(struct servreg_loc_entry_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct
+				qmi_servreg_loc_get_domain_list_resp_msg_v01,
+				domain_list),
+		.ei_array      = servreg_loc_entry_v01_ei,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_register_service_list_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			domain_name),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			service_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = QMI_SERVREG_LOC_LIST_LENGTH_V01,
+		.elem_size      = sizeof(struct servreg_loc_entry_v01),
+		.is_array       = VAR_LEN_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_req_msg_v01,
+			service_list),
+		.ei_array      = servreg_loc_entry_v01_ei,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_register_service_list_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_loc_register_service_list_resp_msg_v01,
+			resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+struct elem_info qmi_servreg_loc_database_updated_ind_msg_v01_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+#endif
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
new file mode 100644
index 0000000..2f578c5
--- /dev/null
+++ b/drivers/soc/qcom/service-locator.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "servloc: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/service-locator.h>
+#include "service-locator-private.h"
+
+#define SERVREG_LOC_SERVICE_INSTANCE_ID			1
+
+#define QMI_RESP_BIT_SHIFT(x)				(x << 16)
+#define QMI_SERVREG_LOC_SERVER_INITIAL_TIMEOUT		2000
+#define QMI_SERVREG_LOC_SERVER_TIMEOUT			2000
+#define INITIAL_TIMEOUT					100000
+
+#define LOCATOR_NOT_PRESENT	0
+#define LOCATOR_PRESENT		1
+
+static u32 locator_status = LOCATOR_NOT_PRESENT;
+static bool service_inited;
+
+module_param_named(enable, locator_status, uint, 0644);
+
+static void service_locator_svc_arrive(struct work_struct *work);
+static void service_locator_svc_exit(struct work_struct *work);
+static void service_locator_recv_msg(struct work_struct *work);
+static void pd_locator_work(struct work_struct *work);
+
+struct workqueue_struct *servloc_wq;
+
+struct pd_qmi_data {
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct notifier_block notifier;
+	struct completion service_available;
+	struct mutex service_mutex;
+	struct qmi_handle *clnt_handle;
+};
+
+struct pd_qmi_work {
+	struct work_struct pd_loc_work;
+	struct pd_qmi_client_data *pdc;
+	struct notifier_block *notifier;
+};
+DEFINE_MUTEX(service_init_mutex);
+struct pd_qmi_data service_locator;
+
+/* Please refer soc/qcom/service-locator.h for use about APIs defined here */
+
+static int service_locator_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		queue_work(servloc_wq, &service_locator.svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		queue_work(servloc_wq, &service_locator.svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void service_locator_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&service_locator.svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void service_locator_svc_arrive(struct work_struct *work)
+{
+	int rc = 0;
+
+	/* Create a Local client port for QMI communication */
+	mutex_lock(&service_locator.service_mutex);
+	service_locator.clnt_handle =
+			qmi_handle_create(service_locator_clnt_notify, NULL);
+	if (!service_locator.clnt_handle) {
+		service_locator.clnt_handle = NULL;
+		mutex_unlock(&service_locator.service_mutex);
+		pr_err("Service locator QMI client handle alloc failed!\n");
+		return;
+	}
+
+	/* Connect to service */
+	rc = qmi_connect_to_service(service_locator.clnt_handle,
+		SERVREG_LOC_SERVICE_ID_V01, SERVREG_LOC_SERVICE_VERS_V01,
+		SERVREG_LOC_SERVICE_INSTANCE_ID);
+	if (rc) {
+		qmi_handle_destroy(service_locator.clnt_handle);
+		service_locator.clnt_handle = NULL;
+		mutex_unlock(&service_locator.service_mutex);
+		pr_err("Unable to connnect to service rc:%d\n", rc);
+		return;
+	}
+	if (!service_inited)
+		complete_all(&service_locator.service_available);
+	mutex_unlock(&service_locator.service_mutex);
+	pr_info("Connection established with the Service locator\n");
+}
+
+static void service_locator_svc_exit(struct work_struct *work)
+{
+	mutex_lock(&service_locator.service_mutex);
+	qmi_handle_destroy(service_locator.clnt_handle);
+	service_locator.clnt_handle = NULL;
+	mutex_unlock(&service_locator.service_mutex);
+	pr_info("Connection with service locator lost\n");
+}
+
+static void service_locator_recv_msg(struct work_struct *work)
+{
+	int ret;
+
+	do {
+		pr_debug("Notified about a Receive event\n");
+		ret = qmi_recv_msg(service_locator.clnt_handle);
+		if (ret < 0)
+			pr_err("Error receiving message rc:%d. Retrying...\n",
+								ret);
+	} while (ret == 0);
+
+}
+
+static void store_get_domain_list_response(struct pd_qmi_client_data *pd,
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp,
+		int offset)
+{
+	int i;
+
+	for (i = offset; i < resp->domain_list_len; i++) {
+		pd->domain_list[i].instance_id =
+					resp->domain_list[i].instance_id;
+		strlcpy(pd->domain_list[i].name, resp->domain_list[i].name,
+			QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+		pd->domain_list[i].service_data_valid =
+					resp->domain_list[i].service_data_valid;
+		pd->domain_list[i].service_data =
+					resp->domain_list[i].service_data;
+	}
+}
+
+static int servreg_loc_send_msg(struct msg_desc *req_desc,
+		struct msg_desc *resp_desc,
+		struct qmi_servreg_loc_get_domain_list_req_msg_v01 *req,
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp,
+		struct pd_qmi_client_data *pd)
+{
+	int rc;
+
+	/*
+	 * Send msg and get response. There is a chance that the service went
+	 * away since the time we last checked for it to be available and
+	 * actually made this call. In that case the call just fails.
+	 */
+	rc = qmi_send_req_wait(service_locator.clnt_handle, req_desc, req,
+		sizeof(*req), resp_desc, resp, sizeof(*resp),
+		msecs_to_jiffies(QMI_SERVREG_LOC_SERVER_TIMEOUT));
+	if (rc < 0) {
+		pr_err("QMI send req failed for client %s, ret - %d\n",
+			pd->client_name, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp->resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request for client %s failed 0x%x\n",
+			pd->client_name, QMI_RESP_BIT_SHIFT(resp->resp.error));
+		return -EREMOTEIO;
+	}
+	return rc;
+}
+
+static int service_locator_send_msg(struct pd_qmi_client_data *pd)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_servreg_loc_get_domain_list_resp_msg_v01 *resp = NULL;
+	struct qmi_servreg_loc_get_domain_list_req_msg_v01 *req = NULL;
+	int rc;
+	int db_rev_count = 0, domains_read = 0;
+
+	if (!service_locator.clnt_handle) {
+		pr_err("Service locator not available!\n");
+		return -EAGAIN;
+	}
+
+	req = kmalloc(sizeof(
+		struct qmi_servreg_loc_get_domain_list_req_msg_v01),
+		GFP_KERNEL);
+	if (!req) {
+		pr_err("Unable to allocate memory for req message\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	resp = kmalloc(sizeof(
+		struct qmi_servreg_loc_get_domain_list_resp_msg_v01),
+		GFP_KERNEL);
+	if (!resp) {
+		pr_err("Unable to allocate memory for resp message\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Prepare req and response message formats */
+	req_desc.msg_id = QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_V01;
+	req_desc.max_msg_len =
+		QMI_SERVREG_LOC_GET_DOMAIN_LIST_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_loc_get_domain_list_req_msg_v01_ei;
+
+	resp_desc.msg_id = QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_V01;
+	resp_desc.max_msg_len =
+		QMI_SERVREG_LOC_GET_DOMAIN_LIST_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_loc_get_domain_list_resp_msg_v01_ei;
+
+	/* Prepare req and response message */
+	strlcpy(req->service_name, pd->service_name,
+		QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
+	req->domain_offset_valid = true;
+	req->domain_offset = 0;
+
+	pd->domain_list = NULL;
+	do {
+		req->domain_offset += domains_read;
+		rc = servreg_loc_send_msg(&req_desc, &resp_desc, req, resp,
+					pd);
+		if (rc < 0) {
+			pr_err("send msg failed rc:%d\n", rc);
+			goto out;
+		}
+		if (!domains_read) {
+			db_rev_count = pd->db_rev_count = resp->db_rev_count;
+			pd->total_domains = resp->total_domains;
+			if (!pd->total_domains && resp->domain_list_len) {
+				pr_err("total domains not set\n");
+				pd->total_domains = resp->domain_list_len;
+			}
+			pd->domain_list = kmalloc(
+					sizeof(struct servreg_loc_entry_v01) *
+					resp->total_domains, GFP_KERNEL);
+			if (!pd->domain_list) {
+				pr_err("Cannot allocate domain list\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+		}
+		if (db_rev_count != resp->db_rev_count) {
+			pr_err("Service Locator DB updated for client %s\n",
+				pd->client_name);
+			kfree(pd->domain_list);
+			rc = -EAGAIN;
+			goto out;
+		}
+		/* Copy the response*/
+		store_get_domain_list_response(pd, resp, domains_read);
+		domains_read += resp->domain_list_len;
+	} while (domains_read < resp->total_domains);
+	rc = 0;
+out:
+	kfree(req);
+	kfree(resp);
+	return rc;
+}
+
+static int init_service_locator(void)
+{
+	int rc = 0;
+
+	mutex_lock(&service_init_mutex);
+	if (locator_status == LOCATOR_NOT_PRESENT) {
+		pr_err("Service Locator not enabled\n");
+		rc = -ENODEV;
+		goto inited;
+	}
+	if (service_inited)
+		goto inited;
+
+	service_locator.notifier.notifier_call =
+					service_locator_svc_event_notify;
+	init_completion(&service_locator.service_available);
+	mutex_init(&service_locator.service_mutex);
+
+	servloc_wq = create_singlethread_workqueue("servloc_wq");
+	if (!servloc_wq) {
+		rc = -ENOMEM;
+		pr_err("Could not create workqueue\n");
+		goto inited;
+	}
+
+	INIT_WORK(&service_locator.svc_arrive, service_locator_svc_arrive);
+	INIT_WORK(&service_locator.svc_exit, service_locator_svc_exit);
+	INIT_WORK(&service_locator.svc_rcv_msg, service_locator_recv_msg);
+
+	rc = qmi_svc_event_notifier_register(SERVREG_LOC_SERVICE_ID_V01,
+		SERVREG_LOC_SERVICE_VERS_V01, SERVREG_LOC_SERVICE_INSTANCE_ID,
+		&service_locator.notifier);
+	if (rc < 0) {
+		pr_err("Notifier register failed rc:%d\n", rc);
+		goto inited;
+	}
+
+	wait_for_completion(&service_locator.service_available);
+	service_inited = true;
+	mutex_unlock(&service_init_mutex);
+	pr_info("Service locator initialized\n");
+	return 0;
+
+inited:
+	mutex_unlock(&service_init_mutex);
+	return rc;
+}
+
+int get_service_location(char *client_name, char *service_name,
+				struct notifier_block *locator_nb)
+{
+	struct pd_qmi_client_data *pqcd;
+	struct pd_qmi_work *pqw;
+	int rc = 0;
+
+	if (!locator_nb || !client_name || !service_name) {
+		rc = -EINVAL;
+		pr_err("Invalid input!\n");
+		goto err;
+	}
+
+	pqcd = kmalloc(sizeof(struct pd_qmi_client_data), GFP_KERNEL);
+	if (!pqcd) {
+		rc = -ENOMEM;
+		pr_err("Allocation failed\n");
+		goto err;
+	}
+	strlcpy(pqcd->client_name, client_name, ARRAY_SIZE(pqcd->client_name));
+	strlcpy(pqcd->service_name, service_name,
+		ARRAY_SIZE(pqcd->service_name));
+
+	pqw = kmalloc(sizeof(struct pd_qmi_work), GFP_KERNEL);
+	if (!pqw) {
+		rc = -ENOMEM;
+		pr_err("Allocation failed\n");
+		goto err;
+	}
+	pqw->notifier = locator_nb;
+	pqw->pdc = pqcd;
+
+	INIT_WORK(&pqw->pd_loc_work, pd_locator_work);
+	schedule_work(&pqw->pd_loc_work);
+
+err:
+	return rc;
+}
+EXPORT_SYMBOL(get_service_location);
+
+static void pd_locator_work(struct work_struct *work)
+{
+	int rc = 0;
+	struct pd_qmi_client_data *data;
+	struct pd_qmi_work *pdqw = container_of(work, struct pd_qmi_work,
+								pd_loc_work);
+
+	data = pdqw->pdc;
+	rc = init_service_locator();
+	if (rc) {
+		pr_err("Unable to connect to service locator!, rc = %d\n", rc);
+		pdqw->notifier->notifier_call(pdqw->notifier,
+			LOCATOR_DOWN, NULL);
+		goto err;
+	}
+	rc = service_locator_send_msg(data);
+	if (rc) {
+		pr_err("Failed to get process domains for %s for client %s rc:%d\n",
+			data->service_name, data->client_name, rc);
+		pdqw->notifier->notifier_call(pdqw->notifier,
+			LOCATOR_DOWN, NULL);
+		goto err;
+	}
+	pdqw->notifier->notifier_call(pdqw->notifier, LOCATOR_UP, data);
+
+err:
+	kfree(data);
+	kfree(pdqw);
+}
+
+int find_subsys(const char *pd_path, char *subsys)
+{
+	char *start, *end;
+
+	if (!subsys || !pd_path)
+		return -EINVAL;
+
+	start = strnstr(pd_path, "/", QMI_SERVREG_LOC_NAME_LENGTH_V01);
+	if (!start)
+		return -EINVAL;
+	start++;
+	end = strnstr(start, "/", QMI_SERVREG_LOC_NAME_LENGTH_V01);
+	if (!end || start == end)
+		return -EINVAL;
+
+	strlcpy(subsys, start, end - start + 1);
+	return 0;
+}
+EXPORT_SYMBOL(find_subsys);
+
+static struct pd_qmi_client_data test_data;
+
+static int servloc_test_pdr_cb(struct notifier_block *this,
+			unsigned long opcode, void *ptr)
+{
+	int i, rc = 0;
+	char subsys[QMI_SERVREG_LOC_NAME_LENGTH_V01];
+	struct pd_qmi_client_data *return_data;
+
+	return_data = (struct pd_qmi_client_data *)ptr;
+
+	if (opcode) {
+		pr_err("%s: Failed to get process domain!, opcode = %lu\n",
+			__func__, opcode);
+		return -EIO;
+	}
+
+		pr_err("Service Name: %s\tTotal Domains: %d\n",
+			return_data->service_name, return_data->total_domains);
+
+		for (i = 0; i < return_data->total_domains; i++) {
+			pr_err("Instance ID: %d\t ",
+				return_data->domain_list[i].instance_id);
+			pr_err("Domain Name: %s\n",
+				return_data->domain_list[i].name);
+			rc = find_subsys(return_data->domain_list[i].name,
+					subsys);
+		if (rc < 0)
+			pr_err("No valid subsys found for %s!\n",
+				return_data->domain_list[i].name);
+		else
+			pr_err("Subsys: %s\n", subsys);
+		}
+	return 0;
+}
+
+static struct notifier_block pdr_service_nb = {
+		.notifier_call  = servloc_test_pdr_cb,
+};
+
+static ssize_t servloc_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	int rc = 0;
+	char *node_name = filp->private_data;
+
+	if (!strcmp(node_name, "test_servloc_get"))
+		rc = get_service_location(test_data.client_name,
+				test_data.service_name, &pdr_service_nb);
+
+	return rc;
+}
+
+static ssize_t servloc_write(struct file *fp, const char __user *buf,
+				size_t count, loff_t *unused)
+{
+	char *node_name = fp->private_data;
+
+	if (!buf)
+		return -EIO;
+	if (!strcmp(node_name, "service_name")) {
+		snprintf(test_data.service_name, sizeof(test_data.service_name),
+			"%.*s", (int) min((size_t)count - 1,
+			(sizeof(test_data.service_name) - 1)), buf);
+	} else {
+		snprintf(test_data.client_name, sizeof(test_data.client_name),
+			"%.*s", (int) min((size_t)count - 1,
+			(sizeof(test_data.client_name) - 1)), buf);
+	}
+	return count;
+}
+
+static const struct file_operations servloc_fops = {
+	.open	= simple_open,
+	.read	= servloc_read,
+	.write	= servloc_write,
+};
+
+static struct dentry *servloc_base_dir;
+static struct dentry *test_servloc_file;
+
+static int __init servloc_debugfs_init(void)
+{
+	servloc_base_dir = debugfs_create_dir("test_servloc", NULL);
+	return !servloc_base_dir ? -ENOMEM : 0;
+}
+
+static void servloc_debugfs_exit(void)
+{
+	debugfs_remove_recursive(servloc_base_dir);
+}
+
+static int servloc_debugfs_add(void)
+{
+	int rc;
+
+	if (!servloc_base_dir)
+		return -ENOMEM;
+
+	test_servloc_file = debugfs_create_file("client_name",
+				0644, servloc_base_dir,
+				"client_name", &servloc_fops);
+	rc = !test_servloc_file ? -ENOMEM : 0;
+
+	if (rc == 0) {
+		test_servloc_file = debugfs_create_file("service_name",
+				0644, servloc_base_dir,
+				"service_name", &servloc_fops);
+		rc = !test_servloc_file ? -ENOMEM : 0;
+	}
+
+	if (rc == 0) {
+		test_servloc_file = debugfs_create_file("test_servloc_get",
+				0644, servloc_base_dir,
+				"test_servloc_get", &servloc_fops);
+		rc = !test_servloc_file ? -ENOMEM : 0;
+	}
+	return rc;
+}
+
+static int __init service_locator_init(void)
+{
+	pr_debug("service_locator_status = %d\n", locator_status);
+	if (servloc_debugfs_init())
+		pr_err("Could not create test_servloc base directory!");
+	if (servloc_debugfs_add())
+		pr_err("Could not create test_servloc node entries!");
+	return 0;
+}
+
+static void __exit service_locator_exit(void)
+{
+	servloc_debugfs_exit();
+}
+module_init(service_locator_init);
+module_exit(service_locator_exit);
diff --git a/drivers/soc/qcom/service-notifier-private.h b/drivers/soc/qcom/service-notifier-private.h
new file mode 100644
index 0000000..fa5e578
--- /dev/null
+++ b/drivers/soc/qcom/service-notifier-private.h
@@ -0,0 +1,345 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SERVICE_REGISTRY_NOTIFIER_H
+#define SERVICE_REGISTRY_NOTIFIER_H
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define SERVREG_NOTIF_SERVICE_ID_V01 0x42
+#define SERVREG_NOTIF_SERVICE_VERS_V01 0x01
+
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01 0x0020
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01 0x0020
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_V01 0x0021
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_V01 0x0021
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01 0x0022
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01 0x0023
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01 0x0023
+#define QMI_SERVREG_NOTIF_RESTART_PD_REQ_V01 0x0024
+#define QMI_SERVREG_NOTIF_RESTART_PD_RESP_V01 0x0024
+
+#define QMI_SERVREG_NOTIF_NAME_LENGTH_V01 64
+
+struct qmi_servreg_notif_register_listener_req_msg_v01 {
+	uint8_t enable;
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN 71
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_register_listener_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t curr_state_valid;
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_REQ_MSG_V01_MAX_MSG_LEN 67
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_query_state_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t curr_state_valid;
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+};
+#define QMI_SERVREG_NOTIF_QUERY_STATE_RESP_MSG_V01_MAX_MSG_LEN 14
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_state_updated_ind_msg_v01 {
+	enum qmi_servreg_notif_service_state_enum_type_v01 curr_state;
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+	uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN 79
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+	uint16_t transaction_id;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN 72
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_set_ack_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[];
+
+struct qmi_servreg_notif_restart_pd_req_msg_v01 {
+	char service_name[QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1];
+};
+#define QMI_SERVREG_NOTIF_RESTART_PD_REQ_MSG_V01_MAX_MSG_LEN 67
+extern struct elem_info qmi_servreg_notif_restart_pd_req_msg_v01_ei[];
+
+struct qmi_servreg_notif_restart_pd_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define QMI_SERVREG_NOTIF_RESTART_PD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info qmi_servreg_notif_restart_pd_resp_msg_v01_ei[];
+
+struct elem_info qmi_servreg_notif_register_listener_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_register_listener_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_register_listener_req_msg_v01,
+					   service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_register_listener_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+							curr_state_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+			enum qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+			qmi_servreg_notif_register_listener_resp_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_query_state_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_query_state_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+							curr_state_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum
+				qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_query_state_resp_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_state_updated_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum
+				qmi_servreg_notif_service_state_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								curr_state),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_state_updated_ind_msg_v01,
+								transaction_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_set_ack_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_req_msg_v01,
+								transaction_id),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_set_ack_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_set_ack_resp_msg_v01,
+									resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_restart_pd_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = QMI_SERVREG_NOTIF_NAME_LENGTH_V01 + 1,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_restart_pd_req_msg_v01,
+								service_name),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_servreg_notif_restart_pd_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				qmi_servreg_notif_restart_pd_resp_msg_v01,
+								   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+#endif
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
new file mode 100644
index 0000000..dae3c42
--- /dev/null
+++ b/drivers/soc/qcom/service-notifier.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "service-notifier: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/service-locator.h>
+#include <soc/qcom/service-notifier.h>
+#include "service-notifier-private.h"
+
+#define QMI_RESP_BIT_SHIFT(x)			(x << 16)
+#define SERVREG_NOTIF_NAME_LENGTH	QMI_SERVREG_NOTIF_NAME_LENGTH_V01
+#define SERVREG_NOTIF_SERVICE_ID	SERVREG_NOTIF_SERVICE_ID_V01
+#define SERVREG_NOTIF_SERVICE_VERS	SERVREG_NOTIF_SERVICE_VERS_V01
+
+#define SERVREG_NOTIF_SET_ACK_REQ		\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_REQ_V01
+#define SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN	\
+			QMI_SERVREG_NOTIF_SET_ACK_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_SET_ACK_RESP		\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_ACK_RESP_V01
+#define SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN	\
+			QMI_SERVREG_NOTIF_SET_ACK_RESP_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG	\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_V01
+#define SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN	\
+			QMI_SERVREG_NOTIF_STATE_UPDATED_IND_MSG_V01_MAX_MSG_LEN
+
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ	\
+			QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN \
+		QMI_SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_V01_MAX_MSG_LEN
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP	\
+			QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_V01
+#define SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN \
+		QMI_SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_V01_MAX_MSG_LEN
+
+#define QMI_STATE_MIN_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01
+#define QMI_STATE_MAX_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01
+
+#define SERVER_TIMEOUT				500
+#define MAX_STRING_LEN				100
+
+/*
+ * Per user service data structure
+ * struct service_notif_info - notifier struct for each unique service path
+ * service_path - service provider path/location
+ * instance_id - service instance id specific to a subsystem
+ * service_notif_rcvr_list - list of clients interested in this service
+ *                           providers notifications
+ * curr_state: Current state of the service
+ */
+struct service_notif_info {
+	char service_path[SERVREG_NOTIF_NAME_LENGTH];
+	int instance_id;
+	struct srcu_notifier_head service_notif_rcvr_list;
+	struct list_head list;
+	int curr_state;
+};
+static LIST_HEAD(service_list);
+static DEFINE_MUTEX(service_list_lock);
+
+struct ind_req_resp {
+	char service_path[SERVREG_NOTIF_NAME_LENGTH];
+	int transaction_id;
+};
+
+/*
+ * Per Root Process Domain (Root service) data structure
+ * struct qmi_client_info - QMI client info for each subsystem/instance id
+ * instance_id - service instance id specific to a subsystem (Root PD)
+ * clnt_handle - unique QMI client handle
+ * service_connected - indicates if QMI service is up on the subsystem
+ * ssr_handle - The SSR handle provided by the SSR driver for the subsystem
+ *		on which the remote root PD runs.
+ */
+struct qmi_client_info {
+	int instance_id;
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct work_struct ind_ack;
+	struct workqueue_struct *svc_event_wq;
+	struct qmi_handle *clnt_handle;
+	struct notifier_block notifier;
+	void *ssr_handle;
+	struct notifier_block ssr_notifier;
+	bool service_connected;
+	struct list_head list;
+	struct ind_req_resp ind_msg;
+};
+static LIST_HEAD(qmi_client_list);
+static DEFINE_MUTEX(qmi_list_lock);
+static DEFINE_MUTEX(qmi_client_release_lock);
+
+static DEFINE_MUTEX(notif_add_lock);
+
+static void root_service_clnt_recv_msg(struct work_struct *work);
+static void root_service_service_arrive(struct work_struct *work);
+static void root_service_exit_work(struct work_struct *work);
+
+static struct service_notif_info *_find_service_info(const char *service_path)
+{
+	struct service_notif_info *service_notif;
+
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list)
+		if (!strcmp(service_notif->service_path, service_path)) {
+			mutex_unlock(&service_list_lock);
+			return service_notif;
+		}
+	mutex_unlock(&service_list_lock);
+	return NULL;
+}
+
+static int service_notif_queue_notification(struct service_notif_info
+		*service_notif,
+		enum qmi_servreg_notif_service_state_enum_type_v01 notif_type,
+		void *info)
+{
+	int ret;
+
+	if (service_notif->curr_state == notif_type)
+		return 0;
+
+	ret = srcu_notifier_call_chain(&service_notif->service_notif_rcvr_list,
+							notif_type, info);
+	return ret;
+}
+
+static void root_service_clnt_recv_msg(struct work_struct *work)
+{
+	int ret;
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_rcv_msg);
+
+	do {
+		pr_debug("Polling for QMI recv msg(instance-id: %d)\n",
+							data->instance_id);
+	} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+	pr_debug("Notified about a Receive event (instance-id: %d)\n",
+							data->instance_id);
+}
+
+static void root_service_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	struct qmi_client_info *data = container_of(notify_priv,
+					struct qmi_client_info, svc_arrive);
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&data->svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void send_ind_ack(struct work_struct *work)
+{
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, ind_ack);
+	struct qmi_servreg_notif_set_ack_req_msg_v01 req;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } };
+	int rc;
+
+	req.transaction_id = data->ind_msg.transaction_id;
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+						data->ind_msg.service_path);
+
+	req_desc.msg_id = SERVREG_NOTIF_SET_ACK_REQ;
+	req_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_REQ_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_set_ack_req_msg_v01_ei;
+
+	resp_desc.msg_id = SERVREG_NOTIF_SET_ACK_RESP;
+	resp_desc.max_msg_len = SERVREG_NOTIF_SET_ACK_RESP_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_notif_set_ack_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc,
+				&req, sizeof(req), &resp_desc, &resp,
+				sizeof(resp), SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Sending Ack failed/server timeout, ret - %d\n",
+						data->ind_msg.service_path, rc);
+		return;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01)
+		pr_err("QMI request failed 0x%x\n",
+			QMI_RESP_BIT_SHIFT(resp.resp.error));
+	pr_debug("Indication ACKed for transid %d, service %s, instance %d!\n",
+		data->ind_msg.transaction_id, data->ind_msg.service_path,
+		data->instance_id);
+}
+
+static void root_service_service_ind_cb(struct qmi_handle *handle,
+				unsigned int msg_id, void *msg,
+				unsigned int msg_len, void *ind_cb_priv)
+{
+	struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
+	struct service_notif_info *service_notif;
+	struct msg_desc ind_desc;
+	struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
+					QMI_STATE_MIN_VAL, "", 0xFFFF };
+	int rc;
+
+	ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
+	ind_desc.max_msg_len = SERVREG_NOTIF_STATE_UPDATED_IND_MSG_LEN;
+	ind_desc.ei_array = qmi_servreg_notif_state_updated_ind_msg_v01_ei;
+	rc = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+	if (rc < 0) {
+		pr_err("Failed to decode message rc:%d\n", rc);
+		return;
+	}
+
+	pr_debug("Indication received from %s, state: 0x%x, trans-id: %d\n",
+		ind_msg.service_name, ind_msg.curr_state,
+		ind_msg.transaction_id);
+
+	service_notif = _find_service_info(ind_msg.service_name);
+	if (!service_notif)
+		return;
+
+	if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL ||
+			(int)ind_msg.curr_state > QMI_STATE_MAX_VAL)
+		pr_err("Unexpected indication notification state %d\n",
+							ind_msg.curr_state);
+	else {
+		mutex_lock(&notif_add_lock);
+		mutex_lock(&service_list_lock);
+		rc = service_notif_queue_notification(service_notif,
+						ind_msg.curr_state, NULL);
+		if (rc & NOTIFY_STOP_MASK)
+			pr_err("Notifier callback aborted for %s with error %d\n",
+						ind_msg.service_name, rc);
+		service_notif->curr_state = ind_msg.curr_state;
+		mutex_unlock(&service_list_lock);
+		mutex_unlock(&notif_add_lock);
+	}
+	data->ind_msg.transaction_id = ind_msg.transaction_id;
+	snprintf(data->ind_msg.service_path,
+		ARRAY_SIZE(data->ind_msg.service_path), "%s",
+		ind_msg.service_name);
+	schedule_work(&data->ind_ack);
+}
+
+static int send_notif_listener_msg_req(struct service_notif_info *service_notif,
+					struct qmi_client_info *data,
+					bool register_notif, int *curr_state)
+{
+	struct qmi_servreg_notif_register_listener_req_msg_v01 req;
+	struct qmi_servreg_notif_register_listener_resp_msg_v01
+						resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+						service_notif->service_path);
+	req.enable = register_notif;
+
+	req_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_REQ;
+	req_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_REQ_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_register_listener_req_msg_v01_ei;
+
+	resp_desc.msg_id = SERVREG_NOTIF_REGISTER_LISTENER_RESP;
+	resp_desc.max_msg_len = SERVREG_NOTIF_REGISTER_LISTENER_RESP_MSG_LEN;
+	resp_desc.ei_array =
+			qmi_servreg_notif_register_listener_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req, sizeof(req),
+				&resp_desc, &resp, sizeof(resp),
+				SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Message sending failed/server timeout, ret - %d\n",
+					service_notif->service_path, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		return -EREMOTEIO;
+	}
+
+	if ((int) resp.curr_state < QMI_STATE_MIN_VAL ||
+				(int) resp.curr_state > QMI_STATE_MAX_VAL) {
+		pr_err("Invalid indication notification state %d\n",
+							resp.curr_state);
+		rc = -EINVAL;
+	}
+	*curr_state = resp.curr_state;
+	return rc;
+}
+
+static int register_notif_listener(struct service_notif_info *service_notif,
+					struct qmi_client_info *data,
+					int *curr_state)
+{
+	return send_notif_listener_msg_req(service_notif, data, true,
+								curr_state);
+}
+
+static void root_service_service_arrive(struct work_struct *work)
+{
+	struct service_notif_info *service_notif = NULL;
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_arrive);
+	int rc;
+	int curr_state;
+
+	/* Create a Local client port for QMI communication */
+	data->clnt_handle = qmi_handle_create(root_service_clnt_notify, work);
+	if (!data->clnt_handle) {
+		pr_err("QMI client handle alloc failed (instance-id: %d)\n",
+							data->instance_id);
+		return;
+	}
+
+	/* Connect to the service on the root PD service */
+	rc = qmi_connect_to_service(data->clnt_handle,
+			SERVREG_NOTIF_SERVICE_ID, SERVREG_NOTIF_SERVICE_VERS,
+			data->instance_id);
+	if (rc < 0) {
+		pr_err("Could not connect to service(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
+		qmi_handle_destroy(data->clnt_handle);
+		data->clnt_handle = NULL;
+		return;
+	}
+	data->service_connected = true;
+	pr_info("Connection established between QMI handle and %d service\n",
+							data->instance_id);
+	/* Register for indication messages about service */
+	rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
+							(void *)data);
+	if (rc < 0)
+		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
+
+	mutex_lock(&notif_add_lock);
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list) {
+		if (service_notif->instance_id == data->instance_id) {
+			rc = register_notif_listener(service_notif, data,
+								&curr_state);
+			if (rc) {
+				pr_err("Notifier registration failed for %s rc:%d\n",
+					service_notif->service_path, rc);
+			} else {
+				rc = service_notif_queue_notification(
+					service_notif, curr_state, NULL);
+				if (rc & NOTIFY_STOP_MASK)
+					pr_err("Notifier callback aborted for %s error:%d\n",
+					service_notif->service_path, rc);
+				service_notif->curr_state = curr_state;
+			}
+		}
+	}
+	mutex_unlock(&service_list_lock);
+	mutex_unlock(&notif_add_lock);
+}
+
+static void root_service_service_exit(struct qmi_client_info *data,
+					enum pd_subsys_state state)
+{
+	struct service_notif_info *service_notif = NULL;
+	int rc;
+
+	/*
+	 * Send service down notifications to all clients
+	 * of registered for notifications for that service.
+	 */
+	mutex_lock(&notif_add_lock);
+	mutex_lock(&service_list_lock);
+	list_for_each_entry(service_notif, &service_list, list) {
+		if (service_notif->instance_id == data->instance_id) {
+			rc = service_notif_queue_notification(service_notif,
+					SERVREG_NOTIF_SERVICE_STATE_DOWN_V01,
+					&state);
+			if (rc & NOTIFY_STOP_MASK)
+				pr_err("Notifier callback aborted for %s with error %d\n",
+					service_notif->service_path, rc);
+			service_notif->curr_state =
+					SERVREG_NOTIF_SERVICE_STATE_DOWN_V01;
+		}
+	}
+	mutex_unlock(&service_list_lock);
+	mutex_unlock(&notif_add_lock);
+
+	/*
+	 * Destroy client handle and try connecting when
+	 * service comes up again.
+	 */
+	mutex_lock(&qmi_client_release_lock);
+	data->service_connected = false;
+	qmi_handle_destroy(data->clnt_handle);
+	data->clnt_handle = NULL;
+	mutex_unlock(&qmi_client_release_lock);
+}
+
+static void root_service_exit_work(struct work_struct *work)
+{
+	struct qmi_client_info *data = container_of(work,
+					struct qmi_client_info, svc_exit);
+	root_service_service_exit(data, UNKNOWN);
+}
+
+static int service_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	struct qmi_client_info *data = container_of(this,
+					struct qmi_client_info, notifier);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		pr_debug("Root PD service UP\n");
+		queue_work(data->svc_event_wq, &data->svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		pr_debug("Root PD service DOWN\n");
+		queue_work(data->svc_event_wq, &data->svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ssr_event_notify(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	struct qmi_client_info *info = container_of(this,
+					struct qmi_client_info, ssr_notifier);
+	struct notif_data *notif = data;
+
+	switch (code) {
+	case	SUBSYS_BEFORE_SHUTDOWN:
+		pr_debug("Root PD DOWN(SSR notification), crashed?%d\n",
+						notif->crashed);
+		if (notif->crashed)
+			root_service_service_exit(info, CRASHED);
+		else
+			root_service_service_exit(info, SHUTDOWN);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static void *add_service_notif(const char *service_path, int instance_id,
+							int *curr_state)
+{
+	struct service_notif_info *service_notif;
+	struct qmi_client_info *tmp, *qmi_data;
+	long int rc;
+	char subsys[SERVREG_NOTIF_NAME_LENGTH];
+
+	rc = find_subsys(service_path, subsys);
+	if (rc < 0) {
+		pr_err("Could not find subsys for %s\n", service_path);
+		return ERR_PTR(rc);
+	}
+
+	service_notif = kzalloc(sizeof(struct service_notif_info), GFP_KERNEL);
+	if (!service_notif)
+		return ERR_PTR(-ENOMEM);
+
+	strlcpy(service_notif->service_path, service_path,
+		ARRAY_SIZE(service_notif->service_path));
+	service_notif->instance_id = instance_id;
+
+	/* If we already have a connection to the root PD on which the remote
+	 * service we are interested in notifications about runs, then use
+	 * the existing QMI connection.
+	 */
+	mutex_lock(&qmi_list_lock);
+	list_for_each_entry(tmp, &qmi_client_list, list) {
+		if (tmp->instance_id == instance_id) {
+			if (tmp->service_connected) {
+				rc = register_notif_listener(service_notif, tmp,
+								curr_state);
+				if (rc) {
+					mutex_unlock(&qmi_list_lock);
+					pr_err("Register notifier failed: %s",
+						service_path);
+					kfree(service_notif);
+					return ERR_PTR(rc);
+				}
+				service_notif->curr_state = *curr_state;
+			}
+			mutex_unlock(&qmi_list_lock);
+			goto add_service_list;
+		}
+	}
+	mutex_unlock(&qmi_list_lock);
+
+	qmi_data = kzalloc(sizeof(struct qmi_client_info), GFP_KERNEL);
+	if (!qmi_data) {
+		kfree(service_notif);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	qmi_data->instance_id = instance_id;
+	qmi_data->clnt_handle = NULL;
+	qmi_data->notifier.notifier_call = service_event_notify;
+
+	qmi_data->svc_event_wq = create_singlethread_workqueue(subsys);
+	if (!qmi_data->svc_event_wq) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	INIT_WORK(&qmi_data->svc_arrive, root_service_service_arrive);
+	INIT_WORK(&qmi_data->svc_exit, root_service_exit_work);
+	INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg);
+	INIT_WORK(&qmi_data->ind_ack, send_ind_ack);
+
+	*curr_state = service_notif->curr_state =
+				SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01;
+
+	rc = qmi_svc_event_notifier_register(SERVREG_NOTIF_SERVICE_ID,
+			SERVREG_NOTIF_SERVICE_VERS, qmi_data->instance_id,
+			&qmi_data->notifier);
+	if (rc < 0) {
+		pr_err("Notifier register failed (instance-id: %d)\n",
+							qmi_data->instance_id);
+		goto exit;
+	}
+	qmi_data->ssr_notifier.notifier_call = ssr_event_notify;
+	qmi_data->ssr_handle = subsys_notif_register_notifier(subsys,
+						&qmi_data->ssr_notifier);
+	if (IS_ERR(qmi_data->ssr_handle)) {
+		pr_err("SSR notif register for %s failed(instance-id: %d)\n",
+			subsys, qmi_data->instance_id);
+		rc = PTR_ERR(qmi_data->ssr_handle);
+		goto exit;
+	}
+
+	mutex_lock(&qmi_list_lock);
+	INIT_LIST_HEAD(&qmi_data->list);
+	list_add_tail(&qmi_data->list, &qmi_client_list);
+	mutex_unlock(&qmi_list_lock);
+
+add_service_list:
+	srcu_init_notifier_head(&service_notif->service_notif_rcvr_list);
+
+	mutex_lock(&service_list_lock);
+	INIT_LIST_HEAD(&service_notif->list);
+	list_add_tail(&service_notif->list, &service_list);
+	mutex_unlock(&service_list_lock);
+
+	return service_notif;
+exit:
+	if (qmi_data->svc_event_wq)
+		destroy_workqueue(qmi_data->svc_event_wq);
+	kfree(qmi_data);
+	kfree(service_notif);
+	return ERR_PTR(rc);
+}
+
+static int send_pd_restart_req(const char *service_path,
+				struct qmi_client_info *data)
+{
+	struct qmi_servreg_notif_restart_pd_req_msg_v01 req;
+	struct qmi_servreg_notif_register_listener_resp_msg_v01
+						resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
+							service_path);
+
+	req_desc.msg_id = QMI_SERVREG_NOTIF_RESTART_PD_REQ_V01;
+	req_desc.max_msg_len =
+		QMI_SERVREG_NOTIF_RESTART_PD_REQ_MSG_V01_MAX_MSG_LEN;
+	req_desc.ei_array = qmi_servreg_notif_restart_pd_req_msg_v01_ei;
+
+	resp_desc.msg_id = QMI_SERVREG_NOTIF_RESTART_PD_RESP_V01;
+	resp_desc.max_msg_len =
+		QMI_SERVREG_NOTIF_RESTART_PD_RESP_MSG_V01_MAX_MSG_LEN;
+	resp_desc.ei_array = qmi_servreg_notif_restart_pd_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+			sizeof(req), &resp_desc, &resp, sizeof(resp),
+			SERVER_TIMEOUT);
+	if (rc < 0) {
+		pr_err("%s: Message sending failed/server timeout, ret - %d\n",
+							service_path, rc);
+		return rc;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request for PD restart failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		return -EREMOTEIO;
+	}
+
+	return rc;
+
+}
+
+/* service_notif_pd_restart() - Request PD restart
+ * @service_path: Individual service identifier path for which restart is
+ *		being requested.
+ * @instance_id: Instance id specific to a subsystem.
+ *
+ * @return: >=0 on success, standard Linux error codes on failure.
+ */
+int service_notif_pd_restart(const char *service_path, int instance_id)
+{
+	struct qmi_client_info *tmp;
+	int rc = 0;
+
+	list_for_each_entry(tmp, &qmi_client_list, list) {
+		if (tmp->instance_id == instance_id) {
+			if (tmp->service_connected) {
+				pr_info("Restarting service %s, instance-id %d\n",
+						service_path, instance_id);
+				rc = send_pd_restart_req(service_path, tmp);
+			} else
+				pr_info("Service %s is not connected\n",
+							service_path);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(service_notif_pd_restart);
+
+/* service_notif_register_notifier() - Register a notifier for a service
+ * On success, it returns back a handle. It takes the following arguments:
+ * service_path: Individual service identifier path for which a client
+ *		registers for notifications.
+ * instance_id: Instance id specific to a subsystem.
+ * current_state: Current state of service returned by the registration
+ *		 process.
+ * notifier block: notifier callback for service events.
+ */
+void *service_notif_register_notifier(const char *service_path, int instance_id,
+				struct notifier_block *nb, int *curr_state)
+{
+	struct service_notif_info *service_notif;
+	int ret = 0;
+
+	if (!service_path || !instance_id || !nb)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&notif_add_lock);
+	service_notif = _find_service_info(service_path);
+	if (!service_notif) {
+		service_notif = (struct service_notif_info *)add_service_notif(
+								service_path,
+								instance_id,
+								curr_state);
+		if (IS_ERR(service_notif))
+			goto exit;
+	}
+
+	ret = srcu_notifier_chain_register(
+				&service_notif->service_notif_rcvr_list, nb);
+	*curr_state = service_notif->curr_state;
+	if (ret < 0)
+		service_notif = ERR_PTR(ret);
+exit:
+	mutex_unlock(&notif_add_lock);
+	return service_notif;
+}
+EXPORT_SYMBOL(service_notif_register_notifier);
+
+/* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ * service_notif_handle - The notifier handler that was provided by the
+ *			  service_notif_register_notifier function when the
+ *			  client registered for notifications.
+ * nb - The notifier block that was previously used during the registration.
+ */
+int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb)
+{
+	struct service_notif_info *service_notif;
+
+	if (!service_notif_handle || !nb)
+		return -EINVAL;
+
+	service_notif = (struct service_notif_info *)service_notif_handle;
+	if (service_notif < 0)
+		return -EINVAL;
+
+	return srcu_notifier_chain_unregister(
+				&service_notif->service_notif_rcvr_list, nb);
+}
+EXPORT_SYMBOL(service_notif_unregister_notifier);
+
+struct service_notifier_test_data {
+	char service_path[MAX_STRING_LEN];
+	int instance_id;
+	struct notifier_block nb;
+	void *service_notif_handle;
+};
+
+static struct service_notifier_test_data test_data;
+
+static void print_service_provider_state(int notification, char *type)
+{
+	if (notification == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
+		pr_info("%s: Service %s down!\n", type, test_data.service_path);
+	else if (notification == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
+		pr_info("%s: Service %s up!\n", type, test_data.service_path);
+	else if (notification == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01)
+		pr_info("%s: Service %s state uninit!\n", type,
+						test_data.service_path);
+	else
+		pr_info("%s: Service %s state Unknown 0x%x!\n", type,
+					test_data.service_path, notification);
+}
+
+static int nb_callback(struct notifier_block *nb,
+				  unsigned long notification,
+				  void *data)
+{
+	print_service_provider_state((int)notification, "Notification:");
+	return 0;
+}
+
+static ssize_t show_service_path(struct seq_file *f, void *unused)
+{
+	if (test_data.service_notif_handle)
+		seq_printf(f, "Service Path: %s\n", test_data.service_path);
+	else
+		seq_puts(f, "No existing notifier\n");
+	return 0;
+}
+
+
+static ssize_t set_service_notifier_register(struct file *fp,
+						const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	int curr_state = INT_MAX, rc;
+
+	if (!buf)
+		return -EIO;
+	if (test_data.service_notif_handle) {
+		service_notif_unregister_notifier(
+						test_data.service_notif_handle,
+						&test_data.nb);
+		test_data.service_notif_handle = NULL;
+		pr_info("Unregistering existing notifier for %s\n",
+							test_data.service_path);
+	}
+	rc = simple_write_to_buffer(test_data.service_path, MAX_STRING_LEN,
+							ppos, buf, count - 1);
+	if (rc != count - 1) {
+		pr_err("Unable to read data into kernel buffer\n");
+		goto err;
+	}
+	test_data.nb.notifier_call = nb_callback;
+	test_data.service_notif_handle = service_notif_register_notifier(
+					test_data.service_path,
+					test_data.instance_id, &test_data.nb,
+					&curr_state);
+	if (!IS_ERR(test_data.service_notif_handle)) {
+		pr_info("Notifier Registered for service %s\n",
+						test_data.service_path);
+		print_service_provider_state(curr_state, "Initial State");
+		return count;
+	}
+err:
+	test_data.service_notif_handle = NULL;
+	pr_err("Unable to register notifier for %s\n", test_data.service_path);
+	return -EIO;
+}
+
+static int open_service_notifier_register(struct inode *inode, struct file *f)
+{
+	return single_open(f, (void *) show_service_path,
+							inode->i_private);
+}
+
+static const struct file_operations service_notifier_register_fops = {
+	.open = open_service_notifier_register,
+	.read = seq_read,
+	.write = set_service_notifier_register,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static ssize_t show_service_notifier_id(struct seq_file *f, void *unused)
+{
+	seq_printf(f, "Service instance ID: %d\n", test_data.instance_id);
+	return 0;
+}
+
+static ssize_t set_service_notifier_id(struct file *fp,
+						const char __user *buf,
+						size_t count, loff_t *unused)
+{
+	int val, rc;
+	char kbuf[MAX_STRING_LEN];
+
+	if (count > MAX_STRING_LEN) {
+		rc = -EIO;
+		goto err;
+	}
+	rc = copy_from_user(kbuf, buf, count);
+	if (rc != 0) {
+		rc = -EFAULT;
+		goto err;
+	}
+
+	kbuf[count - 1] = '\0';
+	rc = kstrtoint(kbuf, 0, &val);
+	if (rc < 0)
+		goto err;
+
+	test_data.instance_id = val;
+	return count;
+err:
+	pr_err("Invalid input parameters: rc = %d\n", rc);
+	return rc;
+}
+
+static int open_service_notifier_id(struct inode *inode, struct file *f)
+{
+	return single_open(f, (void *) show_service_notifier_id,
+							inode->i_private);
+}
+
+static const struct file_operations service_notifier_id_fops = {
+	.open = open_service_notifier_id,
+	.read = seq_read,
+	.write = set_service_notifier_id,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static struct dentry *service_notifier_dir;
+static struct dentry *service_path_file;
+static struct dentry *service_id_file;
+
+static int __init service_notifier_init(void)
+{
+	service_notifier_dir = debugfs_create_dir("service_notifier", NULL);
+	if (service_notifier_dir) {
+		service_path_file = debugfs_create_file("service_path",
+				0644, service_notifier_dir, NULL,
+				&service_notifier_register_fops);
+		if (!service_path_file)
+			goto err;
+		service_id_file = debugfs_create_file("service_id",
+				0644, service_notifier_dir, NULL,
+				&service_notifier_id_fops);
+		if (!service_id_file)
+			goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(service_notifier_dir);
+	return 0;
+}
+
+static void __exit service_notifier_exit(void)
+{
+	debugfs_remove_recursive(service_notifier_dir);
+	test_data.nb.notifier_call = nb_callback;
+}
+module_init(service_notifier_init);
+module_exit(service_notifier_exit);
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index c6e288e..cb91789 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1697,7 +1697,6 @@
 	if (ret) {
 		subsys_debugfs_remove(subsys);
 		put_device(&subsys->dev);
-		kfree(subsys);
 		return ERR_PTR(ret);
 	}
 
@@ -1759,7 +1758,6 @@
 err_register:
 	subsys_debugfs_remove(subsys);
 	device_unregister(&subsys->dev);
-	kfree(subsys);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(subsys_register);
diff --git a/include/soc/qcom/service-locator.h b/include/soc/qcom/service-locator.h
new file mode 100644
index 0000000..7fa25b9
--- /dev/null
+++ b/include/soc/qcom/service-locator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Process Domain Service Locator API header
+ */
+
+#ifndef _SERVICE_LOCATOR_H
+#define _SERVICE_LOCATOR_H
+
+#define QMI_SERVREG_LOC_NAME_LENGTH_V01 64
+#define QMI_SERVREG_LOC_LIST_LENGTH_V01 32
+
+/*
+ * @name: The full process domain path for a process domain which provides
+ *	  a particular service
+ * @instance_id: The QMI instance id corresponding to the root process
+ *		 domain which is responsible for notifications for this
+ *		 process domain
+ * @service_data_valid: Indicates if service_data field has valid data
+ * @service_data: Optional service data provided by the service locator
+ */
+struct servreg_loc_entry_v01 {
+	char name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	uint32_t instance_id;
+	uint8_t service_data_valid;
+	uint32_t service_data;
+};
+
+/*
+ * @client_name:   Name of the client calling the api
+ * @service_name:  Name of the service for which the list of process domains
+ *		   is requested
+ * @total_domains: Length of the process domain list
+ * @db_rev_count:  Process domain list database revision number
+ * @domain_list:   List of process domains providing the service
+ */
+struct pd_qmi_client_data {
+	char client_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	char service_name[QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1];
+	int total_domains;
+	int db_rev_count;
+	struct servreg_loc_entry_v01 *domain_list;
+};
+
+enum service_locator_state {
+	LOCATOR_DOWN = 0x0F,
+	LOCATOR_UP = 0x1F,
+};
+
+#if defined(CONFIG_MSM_SERVICE_LOCATOR)
+/*
+ * Use this api to request information regarding the process domains on
+ * which a particular service runs. The client name, the service name
+ * and notifier block pointer need to be provided by client calling the api.
+ * The total domains, db revision and the domain list will be filled in
+ * by the service locator.
+ * Returns 0 on success; otherwise a value < 0 if no valid subsystem is found.
+ */
+int get_service_location(char *client_name, char *service_name,
+		struct notifier_block *locator_nb);
+
+/*
+ * Use this api to request information regarding the subsystem the process
+ * domain runs on.
+ * @pd_path: The name field from inside the servreg_loc_entry that one
+ *	     gets back using the get_processdomains api.
+ * Returns 0 on success; otherwise a value < 0 if no valid subsystem is found.
+ */
+int find_subsys(const char *pd_path, char *subsys);
+
+#else
+
+static inline int get_service_location(char *client_name,
+		char *service_name, struct notifier_block *locator_nb)
+{
+	return -ENODEV;
+}
+
+static inline int find_subsys(const char *pd_path, const char *subsys)
+{
+	return 0;
+}
+
+#endif /* CONFIG_MSM_SERVICE_LOCATOR */
+
+#endif
diff --git a/include/soc/qcom/service-notifier.h b/include/soc/qcom/service-notifier.h
new file mode 100644
index 0000000..0106801
--- /dev/null
+++ b/include/soc/qcom/service-notifier.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Process Domain Service Notifier API header
+ */
+
+#ifndef _SERVICE_NOTIFIER_H
+#define _SERVICE_NOTIFIER_H
+
+enum qmi_servreg_notif_service_state_enum_type_v01 {
+	QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+	SERVREG_NOTIF_SERVICE_STATE_DOWN_V01 = 0x0FFFFFFF,
+	SERVREG_NOTIF_SERVICE_STATE_UP_V01 = 0x1FFFFFFF,
+	SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01 = 0x7FFFFFFF,
+};
+
+enum pd_subsys_state {
+	CRASHED,
+	SHUTDOWN,
+	UNKNOWN,
+};
+#if defined(CONFIG_MSM_SERVICE_NOTIFIER)
+
+/* service_notif_register_notifier() - Register a notifier for a service
+ * On success, it returns back a handle. It takes the following arguments:
+ * service_path: Individual service identifier path for which a client
+ *		registers for notifications.
+ * instance_id: Instance id specific to a subsystem.
+ * current_state: Current state of service returned by the registration
+ *		 process.
+ * notifier block: notifier callback for service events.
+ */
+void *service_notif_register_notifier(const char *service_path, int instance_id,
+				struct notifier_block *nb, int *curr_state);
+
+/* service_notif_unregister_notifier() - Unregister a notifier for a service.
+ * service_notif_handle - The notifier handler that was provided by the
+ *			  service_notif_register_notifier function when the
+ *			  client registered for notifications.
+ * nb - The notifier block that was previously used during the registration.
+ */
+int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb);
+
+int service_notif_pd_restart(const char *service_path, int instance_id);
+
+#else
+
+static inline void *service_notif_register_notifier(const char *service_path,
+				int instance_id, struct notifier_block *nb,
+				int *curr_state)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int service_notif_unregister_notifier(void *service_notif_handle,
+					struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int service_notif_pd_restart(const char *service_path,
+						int instance_id)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_SERVICE_NOTIFIER */
+
+#endif
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 197f2c8..999c120 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -21,3 +21,4 @@
 header-y += vc4_drm.h
 header-y += virtgpu_drm.h
 header-y += sde_drm.h
+header-y += msm_drm_pp.h
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
new file mode 100644
index 0000000..a65dacff
--- /dev/null
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -0,0 +1,51 @@
+#ifndef _MSM_DRM_PP_H_
+#define _MSM_DRM_PP_H_
+
+#include <drm/drm.h>
+
+/**
+ * struct drm_msm_pcc_coeff - PCC coefficient structure for each color
+ *                            component.
+ * @c: constant coefficient.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ * @rg: red green coefficient.
+ * @gb: green blue coefficient.
+ * @rb: red blue coefficient.
+ * @rgb: red blue green coefficient.
+ */
+
+struct drm_msm_pcc_coeff {
+	__u32 c;
+	__u32 r;
+	__u32 g;
+	__u32 b;
+	__u32 rg;
+	__u32 gb;
+	__u32 rb;
+	__u32 rgb;
+};
+
+/**
+ * struct drm_msm_pcc - pcc feature structure
+ * flags: for enable/disable, read/write or customize operations
+ * r: red coefficients.
+ * g: green coefficients.
+ * b: blue coefficients.
+ */
+
+struct drm_msm_pcc {
+	__u64 flags;
+	struct drm_msm_pcc_coeff r;
+	struct drm_msm_pcc_coeff g;
+	struct drm_msm_pcc_coeff b;
+};
+
+#define PA_VLUT_SIZE 256
+struct drm_msm_pa_vlut {
+	__u64 flags;
+	__u32 val[PA_VLUT_SIZE];
+};
+
+#endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index bcd64c2..07a02c7 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -1,36 +1,6 @@
 #ifndef _SDE_DRM_H_
 #define _SDE_DRM_H_
 
-/*
- * Each top level structure is of the following format:
- *
- * struct {
- *         uint64_t version;
- *         union {
- *                 struct version v1;
- *                 ...
- *         } u;
- *
- * Each top level structure maintains independent versioning and is defined
- * as follows:
- *
- * #define STRUCTNAME_V1	0x1
- * ...
- * #define STRUCTNAME_Vn	0x###
- * #define STRUCTNAME_VERSION	STRUCTNAME_Vn
- *
- * Version fields should ALWAYS be declared as type uint64_t. This is because
- * 64-bit compilers tend to pad the structure to 64-bit align the start of
- * union structure members. Having an explicit 64-bit version helps to maintain
- * consistent structure layout between 32-bit and 64-bit compilers.
- *
- * Updates to the structures UAPI should always define a new sub-structure to
- * place within the union, and update STRUCTNAME_VERSION to reference the
- * new version number.
- *
- * User mode code should always set the 'version' field to STRUCTNAME_VERSION.
- */
-
 /* Total number of supported color planes */
 #define SDE_MAX_PLANES  4
 
@@ -105,25 +75,9 @@
 };
 
 /**
- * Enable mask bits for "scaler" property
- *
- * @SDE_DRM_SCALER_PIX_EXT: pix ext sub-structures are valid
- * @SDE_DRM_SCALER_SCALER_2: scaler 2 sub-structures are valid
- * @SDE_DRM_SCALER_SCALER_3: scaler 3 sub-structures are valid
- * @SDE_DRM_SCALER_DECIMATE: decimation fields are valid
- */
-#define SDE_DRM_SCALER_PIX_EXT      0x1
-#define SDE_DRM_SCALER_SCALER_2     0x2
-#define SDE_DRM_SCALER_SCALER_3     0x4
-#define SDE_DRM_SCALER_DECIMATE     0x8
-
-/**
  * struct sde_drm_scaler_v1 - version 1 of struct sde_drm_scaler
- * @enable:        Mask of SDE_DRM_SCALER_ bits
  * @lr:            Pixel extension settings for left/right
  * @tb:            Pixel extension settings for top/botton
- * @horz_decimate: Horizontal decimation factor
- * @vert_decimate: Vertical decimation factor
  * @init_phase_x:  Initial scaler phase values for x
  * @phase_step_x:  Phase step values for x
  * @init_phase_y:  Initial scaler phase values for y
@@ -133,23 +87,12 @@
  */
 struct sde_drm_scaler_v1 {
 	/*
-	 * General definitions
-	 */
-	uint32_t enable;
-
-	/*
 	 * Pix ext settings
 	 */
 	struct sde_drm_pix_ext_v1 lr;
 	struct sde_drm_pix_ext_v1 tb;
 
 	/*
-	 * Decimation settings
-	 */
-	uint32_t horz_decimate;
-	uint32_t vert_decimate;
-
-	/*
 	 * Phase settings
 	 */
 	int32_t init_phase_x[SDE_MAX_PLANES];
@@ -165,22 +108,6 @@
 	uint32_t vert_filter[SDE_MAX_PLANES];
 };
 
-/* Scaler version definition, see top of file for guidelines */
-#define SDE_DRM_SCALER_V1       0x1
-#define SDE_DRM_SCALER_VERSION  SDE_DRM_SCALER_V1
-
-/**
- * struct sde_drm_scaler - scaler structure
- * @version:    Structure version, set to SDE_DRM_SCALER_VERSION
- * @v1:         Version 1 of scaler structure
- */
-struct sde_drm_scaler {
-	uint64_t version;
-	union {
-		struct sde_drm_scaler_v1        v1;
-	};
-};
-
 /*
  * Define constants for struct sde_drm_csc
  */
@@ -188,10 +115,6 @@
 #define SDE_CSC_CLAMP_SIZE          6
 #define SDE_CSC_BIAS_SIZE           3
 
-/* CSC version definition, see top of file for guidelines */
-#define SDE_DRM_CSC_V1              0x1
-#define SDE_DRM_CSC_VERSION         SDE_DRM_CSC_V1
-
 /**
  * struct sde_drm_csc_v1 - version 1 of struct sde_drm_csc
  * @ctm_coeff:          Matrix coefficients, in S31.32 format
@@ -208,18 +131,6 @@
 	uint32_t post_clamp[SDE_CSC_CLAMP_SIZE];
 };
 
-/**
- * struct sde_drm_csc - CSC configuration structure
- * @version: Structure version, set to SDE_DRM_CSC_VERSION
- * @v1:      Version 1 of csc structure
- */
-struct sde_drm_csc {
-	uint64_t version;
-	union {
-		struct sde_drm_csc_v1   v1;
-	};
-};
-
 /* Writeback Config version definition */
 #define SDE_DRM_WB_CFG		0x1