Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm pull request for 4.6 kernel.

  Overall the coolest thing here for me is the nouveau maxwell signed
  firmware support from NVidia, it's taken a long while to extract this
  from them.

  I also wish the ARM vendors just designed one set of display IP, ARM
  display block proliferation is definitely increasing.

  Core:
     - drm_event cleanups
     - Internal API cleanup making mode_fixup optional.
     - Apple GMUX vga switcheroo support.
     - DP AUX testing interface

  Panel:
     - Refactoring of DSI core for use over more transports.

  New driver:
     - ARM hdlcd driver

  i915:
     - FBC/PSR (framebuffer compression, panel self refresh) enabled by default.
     - Ongoing atomic display support work
     - Ongoing runtime PM work
     - Pixel clock limit checks
     - VBT DSI description support
     - GEM fixes
     - GuC firmware scheduler enhancements

  amdkfd:
     - Deferred probing fixes to avoid make file or link ordering.

  amdgpu/radeon:
     - ACP support for i2s audio support.
     - Command Submission/GPU scheduler/GPUVM optimisations
     - Initial GPU reset support for amdgpu

  vmwgfx:
     - Support for DX10 gen mipmaps
     - Pageflipping and other fixes.

  exynos:
     - Exynos5420 SoC support for FIMD
     - Exynos5422 SoC support for MIPI-DSI

  nouveau:
     - GM20x secure boot support - adds acceleration for Maxwell GPUs.
     - GM200 support
     - GM20B clock driver support
     - Power sensors work

  etnaviv:
     - Correctness fixes for GPU cache flushing
     - Better support for i.MX6 systems.

  imx-drm:
     - VBlank IRQ support
     - Fence support
     - OF endpoint support

  msm:
     - HDMI support for 8996 (snapdragon 820)
     - Adreno 430 support
     - Timestamp queries support

  virtio-gpu:
     - Fixes for Android support.

  rockchip:
     - Add support for Innosilicion HDMI

  rcar-du:
     - Support for 4 crtcs
     - R8A7795 support
     - RCar Gen 3 support

  omapdrm:
     - HDMI interlace output support
     - dma-buf import support
     - Refactoring to remove a lot of legacy code.

  tilcdc:
     - Rewrite of pageflipping code
     - dma-buf support
     - pinctrl support

  vc4:
     - HDMI modesetting bug fixes
     - Significant 3D performance improvement.

  fsl-dcu (FreeScale):
     - Lots of fixes

  tegra:
     - Two small fixes

  sti:
     - Atomic support for planes
     - Improved HDMI support"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1063 commits)
  drm/amdgpu: release_pages requires linux/pagemap.h
  drm/sti: restore mode_fixup callback
  drm/amdgpu/gfx7: add MTYPE definition
  drm/amdgpu: removing BO_VAs shouldn't be interruptible
  drm/amd/powerplay: show uvd/vce power gate enablement for tonga.
  drm/amd/powerplay: show uvd/vce power gate info for fiji
  drm/amdgpu: use sched fence if possible
  drm/amdgpu: move ib.fence to job.fence
  drm/amdgpu: give a fence param to ib_free
  drm/amdgpu: include the right version of gmc header files for iceland
  drm/radeon: fix indentation.
  drm/amd/powerplay: add uvd/vce dpm enabling flag to fix the performance issue for CZ
  drm/amdgpu: switch back to 32bit hw fences v2
  drm/amdgpu: remove amdgpu_fence_is_signaled
  drm/amdgpu: drop the extra fence range check v2
  drm/amdgpu: signal fences directly in amdgpu_fence_process
  drm/amdgpu: cleanup amdgpu_fence_wait_empty v2
  drm/amdgpu: keep all fences in an RCU protected array v2
  drm/amdgpu: add number of hardware submissions to amdgpu_fence_driver_init_ring
  drm/amdgpu: RCU protected amd_sched_fence_release
  ...
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index a866933..1692c4d 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1816,7 +1816,7 @@
 	<td valign="top" >Description/Restrictions</td>
 	</tr>
 	<tr>
-	<td rowspan="37" valign="top" >DRM</td>
+	<td rowspan="42" valign="top" >DRM</td>
 	<td valign="top" >Generic</td>
 	<td valign="top" >“rotation”</td>
 	<td valign="top" >BITMASK</td>
@@ -2068,7 +2068,7 @@
 	<td valign="top" >property to suggest an Y offset for a connector</td>
 	</tr>
 	<tr>
-	<td rowspan="3" valign="top" >Optional</td>
+	<td rowspan="8" valign="top" >Optional</td>
 	<td valign="top" >“scaling mode”</td>
 	<td valign="top" >ENUM</td>
 	<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
@@ -2092,6 +2092,61 @@
 	<td valign="top" >TBD</td>
 	</tr>
 	<tr>
+	<td valign="top" >“DEGAMMA_LUT”</td>
+	<td valign="top" >BLOB</td>
+	<td valign="top" >0</td>
+	<td valign="top" >CRTC</td>
+	<td valign="top" >DRM property to set the degamma lookup table
+		(LUT) mapping pixel data from the framebuffer before it is
+		given to the transformation matrix. The data is an interpreted
+		as an array of struct drm_color_lut elements. Hardware might
+		choose not to use the full precision of the LUT elements nor
+		use all the elements of the LUT (for example the hardware
+		might choose to interpolate between LUT[0] and LUT[4]). </td>
+	</tr>
+	<tr>
+	<td valign="top" >“DEGAMMA_LUT_SIZE”</td>
+	<td valign="top" >RANGE | IMMUTABLE</td>
+	<td valign="top" >Min=0, Max=UINT_MAX</td>
+	<td valign="top" >CRTC</td>
+	<td valign="top" >DRM property to gives the size of the lookup
+		table to be set on the DEGAMMA_LUT property (the size depends
+		on the underlying hardware).</td>
+	</tr>
+	<tr>
+	<td valign="top" >“CTM”</td>
+	<td valign="top" >BLOB</td>
+	<td valign="top" >0</td>
+	<td valign="top" >CRTC</td>
+	<td valign="top" >DRM property to set the current
+		transformation matrix (CTM) apply to pixel data after the
+		lookup through the degamma LUT and before the lookup through
+		the gamma LUT. The data is an interpreted as a struct
+		drm_color_ctm.</td>
+	</tr>
+	<tr>
+	<td valign="top" >“GAMMA_LUT”</td>
+	<td valign="top" >BLOB</td>
+	<td valign="top" >0</td>
+	<td valign="top" >CRTC</td>
+	<td valign="top" >DRM property to set the gamma lookup table
+		(LUT) mapping pixel data after to the transformation matrix to
+		data sent to the connector. The data is an interpreted as an
+		array of struct drm_color_lut elements. Hardware might choose
+		not to use the full precision of the LUT elements nor use all
+		the elements of the LUT (for example the hardware might choose
+		to interpolate between LUT[0] and LUT[4]).</td>
+	</tr>
+	<tr>
+	<td valign="top" >“GAMMA_LUT_SIZE”</td>
+	<td valign="top" >RANGE | IMMUTABLE</td>
+	<td valign="top" >Min=0, Max=UINT_MAX</td>
+	<td valign="top" >CRTC</td>
+	<td valign="top" >DRM property to gives the size of the lookup
+		table to be set on the GAMMA_LUT property (the size depends on
+		the underlying hardware).</td>
+	</tr>
+	<tr>
 	<td rowspan="20" valign="top" >i915</td>
 	<td rowspan="2" valign="top" >Generic</td>
 	<td valign="top" >"Broadcast RGB"</td>
@@ -2886,52 +2941,8 @@
     </sect2>
     <sect2>
       <title>File Operations</title>
-      <synopsis>const struct file_operations *fops</synopsis>
-      <abstract>File operations for the DRM device node.</abstract>
-      <para>
-        Drivers must define the file operations structure that forms the DRM
-	userspace API entry point, even though most of those operations are
-	implemented in the DRM core. The <methodname>open</methodname>,
-	<methodname>release</methodname> and <methodname>ioctl</methodname>
-	operations are handled by
-	<programlisting>
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
-  #ifdef CONFIG_COMPAT
-	.compat_ioctl = drm_compat_ioctl,
-  #endif
-        </programlisting>
-      </para>
-      <para>
-        Drivers that implement private ioctls that requires 32/64bit
-	compatibility support must provide their own
-	<methodname>compat_ioctl</methodname> handler that processes private
-	ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.
-      </para>
-      <para>
-        The <methodname>read</methodname> and <methodname>poll</methodname>
-	operations provide support for reading DRM events and polling them. They
-	are implemented by
-	<programlisting>
-	.poll = drm_poll,
-	.read = drm_read,
-	.llseek = no_llseek,
-	</programlisting>
-      </para>
-      <para>
-        The memory mapping implementation varies depending on how the driver
-	manages memory. Pre-GEM drivers will use <function>drm_mmap</function>,
-	while GEM-aware drivers will use <function>drm_gem_mmap</function>. See
-	<xref linkend="drm-gem"/>.
-	<programlisting>
-	.mmap = drm_gem_mmap,
-	</programlisting>
-      </para>
-      <para>
-        No other file operation is supported by the DRM API.
-      </para>
+!Pdrivers/gpu/drm/drm_fops.c file operations
+!Edrivers/gpu/drm/drm_fops.c
     </sect2>
     <sect2>
       <title>IOCTLs</title>
@@ -3319,6 +3330,12 @@
 !Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
 !Idrivers/gpu/drm/i915/intel_csr.c
       </sect2>
+      <sect2>
+	<title>Video BIOS Table (VBT)</title>
+!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
+!Idrivers/gpu/drm/i915/intel_bios.c
+!Idrivers/gpu/drm/i915/intel_bios.h
+      </sect2>
     </sect1>
 
     <sect1>
@@ -3460,6 +3477,7 @@
     </sect1>
     <sect1>
       <title>Public constants</title>
+!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t
 !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
 !Finclude/linux/vga_switcheroo.h vga_switcheroo_state
     </sect1>
@@ -3488,6 +3506,10 @@
         <title>Backlight control</title>
 !Pdrivers/platform/x86/apple-gmux.c Backlight control
       </sect2>
+      <sect2>
+        <title>Public functions</title>
+!Iinclude/linux/apple-gmux.h
+      </sect2>
     </sect1>
   </chapter>
 
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
index 56a961a..9f97df4 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+++ b/Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
@@ -35,6 +35,12 @@
 		  as an interrupt/status bit in the HDMI controller
 		  itself).  See bindings/pinctrl/brcm,bcm2835-gpio.txt
 
+Required properties for V3D:
+- compatible:	Should be "brcm,bcm2835-v3d"
+- reg:		Physical base address and length of the V3D's registers
+- interrupts:	The interrupt number
+		  See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
 Example:
 pixelvalve@7e807000 {
 	compatible = "brcm,bcm2835-pixelvalve2";
@@ -60,6 +66,12 @@
 	clock-names = "pixel", "hdmi";
 };
 
+v3d: v3d@7ec00000 {
+	compatible = "brcm,bcm2835-v3d";
+	reg = <0x7ec00000 0x1000>;
+	interrupts = <1 10>;
+};
+
 vc4: gpu {
 	compatible = "brcm,bcm2835-vc4";
 };
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
index 0e6f0c0..22756b3 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
@@ -6,6 +6,7 @@
 		"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
 		"samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
 		"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
+		"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
 		"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
   - reg: physical base address and length of the registers set for the device
   - interrupts: should contain DSI interrupt
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 27c3ce0..c7c6b9a 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -12,7 +12,8 @@
 		"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
 		"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
 		"samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
-		"samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
+		"samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
+		"samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
 
 - reg: physical base address and length of the FIMD registers set.
 
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index e7423be..f5948c4 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -44,9 +44,34 @@
 - pinctrl-names: the pin control state names; should contain "default"
 - pinctrl-0: the default pinctrl state (active)
 - pinctrl-n: the "sleep" pinctrl state
-- port: DSI controller output port. This contains one endpoint subnode, with its
-  remote-endpoint set to the phandle of the connected panel's endpoint.
-  See Documentation/devicetree/bindings/graph.txt for device graph info.
+- port: DSI controller output port, containing one endpoint subnode.
+
+  DSI Endpoint properties:
+  - remote-endpoint: set to phandle of the connected panel's endpoint.
+    See Documentation/devicetree/bindings/graph.txt for device graph info.
+  - qcom,data-lane-map: this describes how the logical DSI lanes are mapped
+    to the physical lanes on the given platform. The value contained in
+    index n describes what logical data lane is mapped to the physical data
+    lane n (DATAn, where n lies between 0 and 3).
+
+    For example:
+
+    qcom,data-lane-map = <3 0 1 2>;
+
+    The above mapping describes that the logical data lane DATA3 is mapped to
+    the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1
+    to phys DATA2 and logic DATA2 to phys DATA3.
+
+    There are only a limited number of physical to logical mappings possible:
+
+    "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3;
+    "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+    "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3;
+    "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3;
+    "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3;
+    "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3;
+    "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3;
+    "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
 
 DSI PHY:
 Required properties:
@@ -131,6 +156,7 @@
 		port {
 			dsi0_out: endpoint {
 				remote-endpoint = <&panel_in>;
+				lanes = <0 1 2 3>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index 379ee2e..b63f614 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -11,6 +11,7 @@
 - reg: Physical base address and length of the controller's registers
 - reg-names: "core_physical"
 - interrupts: The interrupt signal from the hdmi block.
+- power-domains: Should be <&mmcc MDSS_GDSC>.
 - clocks: device clocks
   See ../clocks/clock-bindings.txt for details.
 - qcom,hdmi-tx-ddc-clk-gpio: ddc clk pin
@@ -18,6 +19,8 @@
 - qcom,hdmi-tx-hpd-gpio: hpd pin
 - core-vdda-supply: phandle to supply regulator
 - hdmi-mux-supply: phandle to mux regulator
+- phys: the phandle for the HDMI PHY device
+- phy-names: the name of the corresponding PHY device
 
 Optional properties:
 - qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
@@ -27,15 +30,38 @@
 - pinctrl-0: the default pinctrl state (active)
 - pinctrl-1: the "sleep" pinctrl state
 
+HDMI PHY:
+Required properties:
+- compatible: Could be the following
+  * "qcom,hdmi-phy-8660"
+  * "qcom,hdmi-phy-8960"
+  * "qcom,hdmi-phy-8974"
+  * "qcom,hdmi-phy-8084"
+  * "qcom,hdmi-phy-8996"
+- #phy-cells: Number of cells in a PHY specifier; Should be 0.
+- reg: Physical base address and length of the registers of the PHY sub blocks.
+- reg-names: The names of register regions. The following regions are required:
+  * "hdmi_phy"
+  * "hdmi_pll"
+  For HDMI PHY on msm8996, these additional register regions are required:
+    * "hdmi_tx_l0"
+    * "hdmi_tx_l1"
+    * "hdmi_tx_l3"
+    * "hdmi_tx_l4"
+- power-domains: Should be <&mmcc MDSS_GDSC>.
+- clocks: device clocks
+  See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- core-vdda-supply: phandle to vdda regulator device node
+
 Example:
 
 / {
 	...
 
-	hdmi: qcom,hdmi-tx-8960@4a00000 {
+	hdmi: hdmi@4a00000 {
 		compatible = "qcom,hdmi-tx-8960";
 		reg-names = "core_physical";
-		reg = <0x04a00000 0x1000>;
+		reg = <0x04a00000 0x2f0>;
 		interrupts = <GIC_SPI 79 0>;
 		power-domains = <&mmcc MDSS_GDSC>;
 		clock-names =
@@ -54,5 +80,21 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&hpd_active  &ddc_active  &cec_active>;
 		pinctrl-1 = <&hpd_suspend &ddc_suspend &cec_suspend>;
+
+		phys = <&hdmi_phy>;
+		phy-names = "hdmi_phy";
+	};
+
+	hdmi_phy: phy@4a00400 {
+		compatible = "qcom,hdmi-phy-8960";
+		reg-names = "hdmi_phy",
+			    "hdmi_pll";
+		reg = <0x4a00400 0x60>,
+		      <0x4a00500 0x100>;
+		#phy-cells = <0>;
+		power-domains = <&mmcc MDSS_GDSC>;
+		clock-names = "slave_iface_clk";
+		clocks = <&mmcc HDMI_S_AHB_CLK>;
+		core-vdda-supply = <&pm8921_hdmi_mvs>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
new file mode 100644
index 0000000..8c5de69
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
@@ -0,0 +1,7 @@
+LG 12.0" (1920x1280 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp120up1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt b/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
new file mode 100644
index 0000000..088a6ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
@@ -0,0 +1,16 @@
+United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
+
+Supported are LVDS versions (-11T, -19T) and parallel ones
+(-T, -1T, -7T, -20T).
+
+Required properties:
+- compatible: should be one of:
+  "urt,umsh-8596md-t",
+  "urt,umsh-8596md-1t",
+  "urt,umsh-8596md-7t",
+  "urt,umsh-8596md-11t",
+  "urt,umsh-8596md-19t",
+  "urt,umsh-8596md-20t".
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index eccd4f4..0d30e42 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -8,6 +8,7 @@
     - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
     - "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
     - "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
+    - "renesas,du-r8a7795" for R8A7795 (R-Car H3) compatible DU
 
   - reg: A list of base address and length of each memory resource, one for
     each entry in the reg-names property.
@@ -24,7 +25,7 @@
   - clock-names: Name of the clocks. This property is model-dependent.
     - R8A7779 uses a single functional clock. The clock doesn't need to be
       named.
-    - R8A779[0134] use one functional clock per channel and one clock per LVDS
+    - R8A779[01345] use one functional clock per channel and one clock per LVDS
       encoder (if available). The functional clocks must be named "du.x" with
       "x" being the channel numerical index. The LVDS clocks must be named
       "lvds.x" with "x" being the LVDS encoder numerical index.
@@ -41,13 +42,14 @@
 The following table lists for each supported model the port number
 corresponding to each DU output.
 
-		Port 0		Port1		Port2
+		Port 0		Port1		Port2		Port3
 -----------------------------------------------------------------------------
- R8A7779 (H1)	DPAD 0		DPAD 1		-
- R8A7790 (H2)	DPAD		LVDS 0		LVDS 1
- R8A7791 (M2-W)	DPAD		LVDS 0		-
- R8A7793 (M2-N)	DPAD		LVDS 0		-
- R8A7794 (E2)	DPAD 0		DPAD 1		-
+ R8A7779 (H1)	DPAD 0		DPAD 1		-		-
+ R8A7790 (H2)	DPAD		LVDS 0		LVDS 1		-
+ R8A7791 (M2-W)	DPAD		LVDS 0		-		-
+ R8A7793 (M2-N)	DPAD		LVDS 0		-		-
+ R8A7794 (E2)	DPAD 0		DPAD 1		-		-
+ R8A7795 (H3)	DPAD		HDMI 0		HDMI 1		LVDS
 
 
 Example: R8A7790 (R-Car H2) DU
diff --git a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt
new file mode 100644
index 0000000..8096a29
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt
@@ -0,0 +1,50 @@
+Rockchip specific extensions to the Innosilicon HDMI
+================================
+
+Required properties:
+- compatible:
+	"rockchip,rk3036-inno-hdmi";
+- reg:
+	Physical base address and length of the controller's registers.
+- clocks, clock-names:
+	Phandle to hdmi controller clock, name should be "pclk"
+- interrupts:
+	HDMI interrupt number
+- ports:
+	Contain one port node with endpoint definitions as defined in
+	Documentation/devicetree/bindings/graph.txt.
+- pinctrl-0, pinctrl-name:
+	Switch the iomux of HPD/CEC pins to HDMI function.
+
+Example:
+hdmi: hdmi@20034000 {
+	compatible = "rockchip,rk3036-inno-hdmi";
+	reg = <0x20034000 0x4000>;
+	interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&cru  PCLK_HDMI>;
+	clock-names = "pclk";
+	pinctrl-names = "default";
+	pinctrl-0 = <&hdmi_ctl>;
+	status = "disabled";
+
+	hdmi_in: port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		hdmi_in_lcdc: endpoint@0 {
+			reg = <0>;
+			remote-endpoint = <&lcdc_out_hdmi>;
+		};
+	};
+};
+
+&pinctrl {
+	hdmi {
+		hdmi_ctl: hdmi-ctl {
+			rockchip,pins = <1 8  RK_FUNC_1 &pcfg_pull_none>,
+					<1 9  RK_FUNC_1 &pcfg_pull_none>,
+					<1 10 RK_FUNC_1 &pcfg_pull_none>,
+					<1 11 RK_FUNC_1 &pcfg_pull_none>;
+		};
+	};
+
+};
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 480c8de..32ac32e 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -257,17 +257,15 @@
 
    Interface:
       int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-				   size_t start, size_t len,
 				   enum dma_data_direction direction)
 
    This allows the exporter to ensure that the memory is actually available for
    cpu access - the exporter might need to allocate or swap-in and pin the
    backing storage. The exporter also needs to ensure that cpu access is
-   coherent for the given range and access direction. The range and access
-   direction can be used by the exporter to optimize the cache flushing, i.e.
-   access outside of the range or with a different direction (read instead of
-   write) might return stale or even bogus data (e.g. when the exporter needs to
-   copy the data to temporary storage).
+   coherent for the access direction. The direction can be used by the exporter
+   to optimize the cache flushing, i.e. access with a different direction (read
+   instead of write) might return stale or even bogus data (e.g. when the
+   exporter needs to copy the data to temporary storage).
 
    This step might fail, e.g. in oom conditions.
 
@@ -322,14 +320,13 @@
 
 3. Finish access
 
-   When the importer is done accessing the range specified in begin_cpu_access,
-   it needs to announce this to the exporter (to facilitate cache flushing and
-   unpinning of any pinned resources). The result of any dma_buf kmap calls
-   after end_cpu_access is undefined.
+   When the importer is done accessing the CPU, it needs to announce this to
+   the exporter (to facilitate cache flushing and unpinning of any pinned
+   resources). The result of any dma_buf kmap calls after end_cpu_access is
+   undefined.
 
    Interface:
       void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
-				  size_t start, size_t len,
 				  enum dma_data_direction dir);
 
 
@@ -353,7 +350,26 @@
    handles, too). So it's beneficial to support this in a similar fashion on
    dma-buf to have a good transition path for existing Android userspace.
 
-   No special interfaces, userspace simply calls mmap on the dma-buf fd.
+   No special interfaces, userspace simply calls mmap on the dma-buf fd, making
+   sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always*
+   used when the access happens. This is discussed next paragraphs.
+
+   Some systems might need some sort of cache coherency management e.g. when
+   CPU and GPU domains are being accessed through dma-buf at the same time. To
+   circumvent this problem there are begin/end coherency markers, that forward
+   directly to existing dma-buf device drivers vfunc hooks. Userspace can make
+   use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence
+   would be used like following:
+     - mmap dma-buf fd
+     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
+       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
+       want (with the new data being consumed by the GPU or say scanout device)
+     - munmap once you don't need the buffer any more
+
+    Therefore, for correctness and optimal performance, systems with the memory
+    cache shared by the GPU and CPU i.e. the "coherent" and also the
+    "incoherent" are always required to use SYNC_START and SYNC_END before and
+    after, respectively, when accessing the mapped address.
 
 2. Supporting existing mmap interfaces in importers
 
diff --git a/MAINTAINERS b/MAINTAINERS
index da636ea..29d9779 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -847,6 +847,12 @@
 F:	drivers/net/arcnet/
 F:	include/uapi/linux/if_arcnet.h
 
+ARM HDLCD DRM DRIVER
+M:	Liviu Dudau <liviu.dudau@arm.com>
+S:	Supported
+F:	drivers/gpu/drm/arm/
+F:	Documentation/devicetree/bindings/display/arm,hdlcd.txt
+
 ARM MFM AND FLOPPY DRIVERS
 M:	Ian Molton <spyro@f2s.com>
 S:	Maintained
@@ -3754,7 +3760,7 @@
 F:	include/drm/
 F:	include/uapi/drm/
 
-RADEON DRM DRIVERS
+RADEON and AMDGPU DRM DRIVERS
 M:	Alex Deucher <alexander.deucher@amd.com>
 M:	Christian König <christian.koenig@amd.com>
 L:	dri-devel@lists.freedesktop.org
@@ -3762,6 +3768,8 @@
 S:	Supported
 F:	drivers/gpu/drm/radeon/
 F:	include/uapi/drm/radeon*
+F:	drivers/gpu/drm/amd/
+F:	include/uapi/drm/amdgpu*
 
 DRM PANEL DRIVERS
 M:	Thierry Reding <thierry.reding@gmail.com>
@@ -3806,7 +3814,7 @@
 F:	include/uapi/drm/exynos*
 
 DRM DRIVERS FOR FREESCALE DCU
-M:	Jianwei Wang <jianwei.wang.chn@gmail.com>
+M:	Stefan Agner <stefan@agner.ch>
 M:	Alison Wang <alison.wang@freescale.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Supported
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1341a94..aef87fd 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -555,8 +555,10 @@
 static void intel_gtt_teardown_scratch_page(void)
 {
 	set_pages_wb(intel_private.scratch_page, 1);
-	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (intel_private.needs_dmar)
+		pci_unmap_page(intel_private.pcidev,
+			       intel_private.scratch_page_dma,
+			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 	__free_page(intel_private.scratch_page);
 }
 
@@ -1346,16 +1348,6 @@
 {
 	int i, mask;
 
-	/*
-	 * Can be called from the fake agp driver but also directly from
-	 * drm/i915.ko. Hence we need to check whether everything is set up
-	 * already.
-	 */
-	if (intel_private.driver) {
-		intel_private.refcount++;
-		return 1;
-	}
-
 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
 		if (gpu_pdev) {
 			if (gpu_pdev->device ==
@@ -1376,16 +1368,26 @@
 	if (!intel_private.driver)
 		return 0;
 
-	intel_private.refcount++;
-
 #if IS_ENABLED(CONFIG_AGP_INTEL)
 	if (bridge) {
+		if (INTEL_GTT_GEN > 1)
+			return 0;
+
 		bridge->driver = &intel_fake_agp_driver;
 		bridge->dev_private_data = &intel_private;
 		bridge->dev = bridge_pdev;
 	}
 #endif
 
+
+	/*
+	 * Can be called from the fake agp driver but also directly from
+	 * drm/i915.ko. Hence we need to check whether everything is set up
+	 * already.
+	 */
+	if (intel_private.refcount++)
+		return 1;
+
 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
 
 	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
@@ -1430,6 +1432,8 @@
 	if (--intel_private.refcount)
 		return;
 
+	if (intel_private.scratch_page)
+		intel_gtt_teardown_scratch_page();
 	if (intel_private.pcidev)
 		pci_dev_put(intel_private.pcidev);
 	if (intel_private.bridge_dev)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 155c146..9810d1d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -34,6 +34,8 @@
 #include <linux/poll.h>
 #include <linux/reservation.h>
 
+#include <uapi/linux/dma-buf.h>
+
 static inline int is_dma_buf_file(struct file *);
 
 struct dma_buf_list {
@@ -251,11 +253,54 @@
 	return events;
 }
 
+static long dma_buf_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	struct dma_buf *dmabuf;
+	struct dma_buf_sync sync;
+	enum dma_data_direction direction;
+
+	dmabuf = file->private_data;
+
+	switch (cmd) {
+	case DMA_BUF_IOCTL_SYNC:
+		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
+			return -EFAULT;
+
+		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+			return -EINVAL;
+
+		switch (sync.flags & DMA_BUF_SYNC_RW) {
+		case DMA_BUF_SYNC_READ:
+			direction = DMA_FROM_DEVICE;
+			break;
+		case DMA_BUF_SYNC_WRITE:
+			direction = DMA_TO_DEVICE;
+			break;
+		case DMA_BUF_SYNC_RW:
+			direction = DMA_BIDIRECTIONAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (sync.flags & DMA_BUF_SYNC_END)
+			dma_buf_end_cpu_access(dmabuf, direction);
+		else
+			dma_buf_begin_cpu_access(dmabuf, direction);
+
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
 	.llseek		= dma_buf_llseek,
 	.poll		= dma_buf_poll,
+	.unlocked_ioctl	= dma_buf_ioctl,
 };
 
 /*
@@ -539,13 +584,11 @@
  * preparations. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:	[in]	buffer to prepare cpu access for.
- * @start:	[in]	start of range for cpu access.
- * @len:	[in]	length of range for cpu access.
  * @direction:	[in]	length of range for cpu access.
  *
  * Can return negative error values, returns 0 on success.
  */
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 			     enum dma_data_direction direction)
 {
 	int ret = 0;
@@ -554,8 +597,7 @@
 		return -EINVAL;
 
 	if (dmabuf->ops->begin_cpu_access)
-		ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
-							len, direction);
+		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
 	return ret;
 }
@@ -567,19 +609,17 @@
  * actions. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:	[in]	buffer to complete cpu access for.
- * @start:	[in]	start of range for cpu access.
- * @len:	[in]	length of range for cpu access.
  * @direction:	[in]	length of range for cpu access.
  *
  * This call must always succeed.
  */
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 			    enum dma_data_direction direction)
 {
 	WARN_ON(!dmabuf);
 
 	if (dmabuf->ops->end_cpu_access)
-		dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+		dmabuf->ops->end_cpu_access(dmabuf, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 8ae7ab6..f2a74d0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -25,6 +25,14 @@
 	bool
 	depends on DRM
 
+config DRM_DP_AUX_CHARDEV
+	bool "DRM DP AUX Interface"
+	depends on DRM
+	help
+	  Choose this option to enable a /dev/drm_dp_auxN node that allows to
+	  read and write values to arbitrary DPCD registers on the DP aux
+	  channel.
+
 config DRM_KMS_HELPER
 	tristate
 	depends on DRM
@@ -106,6 +114,8 @@
 	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
 	  graphics card.  If M is selected, the module will be called tdfx.
 
+source "drivers/gpu/drm/arm/Kconfig"
+
 config DRM_R128
 	tristate "ATI Rage 128"
 	depends on DRM && PCI
@@ -162,6 +172,8 @@
 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
 source "drivers/gpu/drm/amd/powerplay/Kconfig"
 
+source "drivers/gpu/drm/amd/acp/Kconfig"
+
 source "drivers/gpu/drm/nouveau/Kconfig"
 
 config DRM_I810
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 61766de..6eb94fc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -22,10 +22,13 @@
 drm-$(CONFIG_AGP) += drm_agpsupport.o
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
-		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
+		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
+		drm_kms_helper_common.o
+
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
+drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 
@@ -33,6 +36,7 @@
 
 obj-$(CONFIG_DRM)	+= drm.o
 obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_ARM)	+= arm/
 obj-$(CONFIG_DRM_TTM)	+= ttm/
 obj-$(CONFIG_DRM_TDFX)	+= tdfx/
 obj-$(CONFIG_DRM_R128)	+= r128/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
new file mode 100644
index 0000000..0f734ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -0,0 +1,10 @@
+menu "ACP Configuration"
+
+config DRM_AMD_ACP
+       bool "Enable ACP IP support"
+       select MFD_CORE
+       select PM_GENERIC_DOMAINS if PM
+       help
+	Choose this option to enable ACP IP support for AMD SOCs.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
new file mode 100644
index 0000000..8363cb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the ACP, which is a sub-component
+# of AMDSOC/AMDGPU drm driver.
+# It provides the HW control for ACP related functionalities.
+
+subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
+
+AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c b/drivers/gpu/drm/amd/acp/acp_hw.c
similarity index 64%
copy from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
copy to drivers/gpu/drm/amd/acp/acp_hw.c
index b3839dc2..7af83f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,22 +19,32 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
 
-static const struct nvkm_subdev_func
-gm204_ibus = {
-	.intr = gk104_ibus_intr,
-};
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
 
-int
-gm204_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
+#include "acp_gfx_if.h"
+
+#define ACP_MODE_I2S	0
+#define ACP_MODE_AZ	1
+
+#define mmACP_AZALIA_I2S_SELECT 0x51d4
+
+int amd_acp_hw_init(void *cgs_device,
+		    unsigned acp_version_major, unsigned acp_version_minor)
 {
-	struct nvkm_subdev *ibus;
-	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-		return -ENOMEM;
-	nvkm_subdev_ctor(&gm204_ibus, device, index, 0, ibus);
+	unsigned int acp_mode = ACP_MODE_I2S;
+
+	if ((acp_version_major == 2) && (acp_version_minor == 2))
+		acp_mode = cgs_read_register(cgs_device,
+					mmACP_AZALIA_I2S_SELECT);
+
+	if (acp_mode != ACP_MODE_I2S)
+		return -ENODEV;
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
similarity index 77%
copy from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
copy to drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
index 6511d6e..bccf47b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,16 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs
- */
-#include "changk104.h"
+*/
 
-#include <nvif/class.h>
+#ifndef _ACP_GFX_IF_H
+#define _ACP_GFX_IF_H
 
-const struct nvkm_fifo_chan_oclass
-gm204_fifo_gpfifo_oclass = {
-	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
+#include <linux/types.h>
+#include "cgs_linux.h"
+#include "cgs_common.h"
+
+int amd_acp_hw_init(void *cgs_device,
+		    unsigned acp_version_major, unsigned acp_version_minor);
+
+#endif /* _ACP_GFX_IF_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 20c9539..c7fcdce 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -8,7 +8,8 @@
 	-I$(FULL_AMD_PATH)/include \
 	-I$(FULL_AMD_PATH)/amdgpu \
 	-I$(FULL_AMD_PATH)/scheduler \
-	-I$(FULL_AMD_PATH)/powerplay/inc
+	-I$(FULL_AMD_PATH)/powerplay/inc \
+	-I$(FULL_AMD_PATH)/acp/include
 
 amdgpu-y := amdgpu_drv.o
 
@@ -20,7 +21,7 @@
 	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
 	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
 	amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
-	atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
+	atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
@@ -92,7 +93,17 @@
 amdgpu-y += \
 	../scheduler/gpu_scheduler.o \
 	../scheduler/sched_fence.o \
-	amdgpu_sched.o
+	amdgpu_job.o
+
+# ACP componet
+ifneq ($(CONFIG_DRM_AMD_ACP),)
+amdgpu-y += amdgpu_acp.o
+
+AMDACPPATH := ../acp
+include $(FULL_AMD_PATH)/acp/Makefile
+
+amdgpu-y += $(AMD_ACP_FILES)
+endif
 
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5e7770f..c4a21c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -53,6 +53,7 @@
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 #include "amd_powerplay.h"
+#include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
 
@@ -74,7 +75,6 @@
 extern int amdgpu_smc_load_fw;
 extern int amdgpu_aspm;
 extern int amdgpu_runtime_pm;
-extern int amdgpu_hard_reset;
 extern unsigned amdgpu_ip_block_mask;
 extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
@@ -82,10 +82,8 @@
 extern int amdgpu_vm_block_size;
 extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
-extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
-extern int amdgpu_enable_semaphores;
 extern int amdgpu_powerplay;
 extern unsigned amdgpu_pcie_gen_cap;
 extern unsigned amdgpu_pcie_lane_cap;
@@ -108,9 +106,6 @@
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES		2
 
-/* number of hw syncs before falling back on blocking */
-#define AMDGPU_NUM_SYNCS			4
-
 /* hardcode that limit for now */
 #define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
 
@@ -146,11 +141,9 @@
 #define CIK_CURSOR_HEIGHT 128
 
 struct amdgpu_device;
-struct amdgpu_fence;
 struct amdgpu_ib;
 struct amdgpu_vm;
 struct amdgpu_ring;
-struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
 struct amdgpu_job;
 struct amdgpu_irq_src;
@@ -248,7 +241,7 @@
 			 unsigned count);
 	/* write pte one entry at a time with addr mapping */
 	void (*write_pte)(struct amdgpu_ib *ib,
-			  uint64_t pe,
+			  const dma_addr_t *pages_addr, uint64_t pe,
 			  uint64_t addr, unsigned count,
 			  uint32_t incr, uint32_t flags);
 	/* for linear pte/pde updates without addr mapping */
@@ -256,8 +249,6 @@
 			    uint64_t pe,
 			    uint64_t addr, unsigned count,
 			    uint32_t incr, uint32_t flags);
-	/* pad the indirect buffer to the necessary number of dw */
-	void (*pad_ib)(struct amdgpu_ib *ib);
 };
 
 /* provided by the gmc block */
@@ -295,12 +286,11 @@
 			struct amdgpu_ib *ib);
 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
 			   uint64_t seq, unsigned flags);
-	bool (*emit_semaphore)(struct amdgpu_ring *ring,
-			       struct amdgpu_semaphore *semaphore,
-			       bool emit_wait);
+	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
 			      uint64_t pd_addr);
 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
 				uint32_t gds_base, uint32_t gds_size,
 				uint32_t gws_base, uint32_t gws_size,
@@ -310,6 +300,8 @@
 	int (*test_ib)(struct amdgpu_ring *ring);
 	/* insert NOP packets */
 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+	/* pad the indirect buffer to the necessary number of dw */
+	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 };
 
 /*
@@ -355,13 +347,15 @@
 	uint64_t			gpu_addr;
 	volatile uint32_t		*cpu_addr;
 	/* sync_seq is protected by ring emission lock */
-	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
-	atomic64_t			last_seq;
+	uint32_t			sync_seq;
+	atomic_t			last_seq;
 	bool				initialized;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
 	struct timer_list		fallback_timer;
-	wait_queue_head_t		fence_queue;
+	unsigned			num_fences_mask;
+	spinlock_t			lock;
+	struct fence			**fences;
 };
 
 /* some special values for the owner field */
@@ -371,19 +365,6 @@
 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 
-struct amdgpu_fence {
-	struct fence base;
-
-	/* RB, DMA, etc. */
-	struct amdgpu_ring		*ring;
-	uint64_t			seq;
-
-	/* filp or special value for fence creator */
-	void				*owner;
-
-	wait_queue_t			fence_wake;
-};
-
 struct amdgpu_user_fence {
 	/* write-back bo */
 	struct amdgpu_bo 	*bo;
@@ -395,24 +376,18 @@
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
 
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+				  unsigned num_hw_submission);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   struct amdgpu_irq_src *irq_src,
 				   unsigned irq_type);
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
-		      struct amdgpu_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
-			    struct amdgpu_ring *ring);
-void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
-			    struct amdgpu_ring *ring);
-
 /*
  * TTM.
  */
@@ -431,6 +406,8 @@
 	/* buffer handling */
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
+	/* Scheduler entity for buffer moves */
+	struct amd_sched_entity			entity;
 };
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -445,9 +422,9 @@
 	struct amdgpu_bo		*robj;
 	struct ttm_validate_buffer	tv;
 	struct amdgpu_bo_va		*bo_va;
-	unsigned			prefered_domains;
-	unsigned			allowed_domains;
 	uint32_t			priority;
+	struct page			**user_pages;
+	int				user_invalidated;
 };
 
 struct amdgpu_bo_va_mapping {
@@ -459,7 +436,6 @@
 
 /* bo virtual addresses in a specific vm */
 struct amdgpu_bo_va {
-	struct mutex		        mutex;
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
 	struct fence		        *last_pt_update;
@@ -483,7 +459,8 @@
 	/* Protected by gem.mutex */
 	struct list_head		list;
 	/* Protected by tbo.reserved */
-	u32				initial_domain;
+	u32				prefered_domains;
+	u32				allowed_domains;
 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
 	struct ttm_placement		placement;
 	struct ttm_buffer_object	tbo;
@@ -505,7 +482,6 @@
 	struct amdgpu_bo		*parent;
 
 	struct ttm_bo_kmap_obj		dma_buf_vmap;
-	pid_t				pid;
 	struct amdgpu_mn		*mn;
 	struct list_head		mn_list;
 };
@@ -554,11 +530,14 @@
  * Assumption is that there won't be hole (all object on same
  * alignment).
  */
+
+#define AMDGPU_SA_NUM_FENCE_LISTS	32
+
 struct amdgpu_sa_manager {
 	wait_queue_head_t	wq;
 	struct amdgpu_bo	*bo;
 	struct list_head	*hole;
-	struct list_head	flist[AMDGPU_MAX_RINGS];
+	struct list_head	flist[AMDGPU_SA_NUM_FENCE_LISTS];
 	struct list_head	olist;
 	unsigned		size;
 	uint64_t		gpu_addr;
@@ -580,13 +559,7 @@
 /*
  * GEM objects.
  */
-struct amdgpu_gem {
-	struct mutex		mutex;
-	struct list_head	objects;
-};
-
-int amdgpu_gem_init(struct amdgpu_device *adev);
-void amdgpu_gem_fini(struct amdgpu_device *adev);
+void amdgpu_gem_force_release(struct amdgpu_device *adev);
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 				int alignment, u32 initial_domain,
 				u64 flags, bool kernel,
@@ -598,32 +571,10 @@
 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 			  struct drm_device *dev,
 			  uint32_t handle, uint64_t *offset_p);
-
-/*
- * Semaphores.
- */
-struct amdgpu_semaphore {
-	struct amdgpu_sa_bo	*sa_bo;
-	signed			waiters;
-	uint64_t		gpu_addr;
-};
-
-int amdgpu_semaphore_create(struct amdgpu_device *adev,
-			    struct amdgpu_semaphore **semaphore);
-bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
-				  struct amdgpu_semaphore *semaphore);
-bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
-				struct amdgpu_semaphore *semaphore);
-void amdgpu_semaphore_free(struct amdgpu_device *adev,
-			   struct amdgpu_semaphore **semaphore,
-			   struct fence *fence);
-
 /*
  * Synchronization
  */
 struct amdgpu_sync {
-	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
-	struct fence		*sync_to[AMDGPU_MAX_RINGS];
 	DECLARE_HASHTABLE(fences, 4);
 	struct fence	        *last_vm_update;
 };
@@ -635,12 +586,11 @@
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
-int amdgpu_sync_rings(struct amdgpu_sync *sync,
-		      struct amdgpu_ring *ring);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_wait(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct fence *fence);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
 
 /*
  * GART structures, functions & helpers
@@ -758,6 +708,7 @@
 	struct fence			*excl;
 	unsigned			shared_count;
 	struct fence			**shared;
+	struct fence_cb			cb;
 };
 
 
@@ -770,12 +721,11 @@
 	uint32_t			length_dw;
 	uint64_t			gpu_addr;
 	uint32_t			*ptr;
-	struct amdgpu_ring		*ring;
-	struct amdgpu_fence		*fence;
 	struct amdgpu_user_fence        *user;
 	struct amdgpu_vm		*vm;
+	unsigned			vm_id;
+	uint64_t			vm_pd_addr;
 	struct amdgpu_ctx		*ctx;
-	struct amdgpu_sync		sync;
 	uint32_t			gds_base, gds_size;
 	uint32_t			gws_base, gws_size;
 	uint32_t			oa_base, oa_size;
@@ -794,13 +744,14 @@
 
 extern struct amd_sched_backend_ops amdgpu_sched_ops;
 
-int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
-					 struct amdgpu_ring *ring,
-					 struct amdgpu_ib *ibs,
-					 unsigned num_ibs,
-					 int (*free_job)(struct amdgpu_job *),
-					 void *owner,
-					 struct fence **fence);
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+		     struct amdgpu_job **job);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+			     struct amdgpu_job **job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+		      struct amd_sched_entity *entity, void *owner,
+		      struct fence **f);
 
 struct amdgpu_ring {
 	struct amdgpu_device		*adev;
@@ -809,7 +760,6 @@
 	struct amd_gpu_scheduler 	sched;
 
 	spinlock_t              fence_lock;
-	struct mutex		*ring_lock;
 	struct amdgpu_bo	*ring_obj;
 	volatile uint32_t	*ring;
 	unsigned		rptr_offs;
@@ -818,7 +768,7 @@
 	unsigned		wptr;
 	unsigned		wptr_old;
 	unsigned		ring_size;
-	unsigned		ring_free_dw;
+	unsigned		max_dw;
 	int			count_dw;
 	uint64_t		gpu_addr;
 	uint32_t		align_mask;
@@ -826,8 +776,6 @@
 	bool			ready;
 	u32			nop;
 	u32			idx;
-	u64			last_semaphore_signal_addr;
-	u64			last_semaphore_wait_addr;
 	u32			me;
 	u32			pipe;
 	u32			queue;
@@ -840,7 +788,6 @@
 	struct amdgpu_ctx	*current_ctx;
 	enum amdgpu_ring_type	type;
 	char			name[16];
-	bool                    is_pte_ring;
 };
 
 /*
@@ -884,13 +831,14 @@
 };
 
 struct amdgpu_vm_id {
-	unsigned		id;
-	uint64_t		pd_gpu_addr;
+	struct amdgpu_vm_manager_id	*mgr_id;
+	uint64_t			pd_gpu_addr;
 	/* last flushed PD/PT update */
-	struct fence	        *flushed_updates;
+	struct fence			*flushed_updates;
 };
 
 struct amdgpu_vm {
+	/* tree of virtual addresses mapped */
 	struct rb_root		va;
 
 	/* protecting invalidated */
@@ -915,30 +863,47 @@
 
 	/* for id and flush management per ring */
 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
-	/* for interval tree */
-	spinlock_t		it_lock;
+
 	/* protecting freed */
 	spinlock_t		freed_lock;
+
+	/* Scheduler entity for page table updates */
+	struct amd_sched_entity	entity;
+};
+
+struct amdgpu_vm_manager_id {
+	struct list_head	list;
+	struct fence		*active;
+	atomic_long_t		owner;
+
+	uint32_t		gds_base;
+	uint32_t		gds_size;
+	uint32_t		gws_base;
+	uint32_t		gws_size;
+	uint32_t		oa_base;
+	uint32_t		oa_size;
 };
 
 struct amdgpu_vm_manager {
-	struct {
-		struct fence	*active;
-		atomic_long_t	owner;
-	} ids[AMDGPU_NUM_VM];
+	/* Handling of VMIDs */
+	struct mutex				lock;
+	unsigned				num_ids;
+	struct list_head			ids_lru;
+	struct amdgpu_vm_manager_id		ids[AMDGPU_NUM_VM];
 
 	uint32_t				max_pfn;
-	/* number of VMIDs */
-	unsigned				nvm;
 	/* vram base address for page table entry  */
 	u64					vram_base_offset;
 	/* is vm enabled? */
 	bool					enabled;
 	/* vm pte handling */
 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
-	struct amdgpu_ring                      *vm_pte_funcs_ring;
+	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
+	unsigned				vm_pte_num_rings;
+	atomic_t				vm_pte_next_ring;
 };
 
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
@@ -949,14 +914,15 @@
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 				  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync);
+		      struct amdgpu_sync *sync, struct fence *fence,
+		      unsigned *vm_id, uint64_t *vm_pd_addr);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
-		     struct amdgpu_vm *vm,
-		     struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
-		     struct amdgpu_vm *vm,
-		     struct fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+		     unsigned vm_id, uint64_t pd_addr,
+		     uint32_t gds_base, uint32_t gds_size,
+		     uint32_t gws_base, uint32_t gws_size,
+		     uint32_t oa_base, uint32_t oa_size);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 				    struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -982,7 +948,6 @@
 		       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		      struct amdgpu_bo_va *bo_va);
-int amdgpu_vm_free_job(struct amdgpu_job *job);
 
 /*
  * context related structures
@@ -1010,10 +975,6 @@
 	struct idr		ctx_handles;
 };
 
-int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
-		    struct amdgpu_ctx *ctx);
-void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
-
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
@@ -1048,13 +1009,15 @@
 	struct amdgpu_bo *gds_obj;
 	struct amdgpu_bo *gws_obj;
 	struct amdgpu_bo *oa_obj;
-	bool has_userptr;
+	unsigned first_userptr;
 	unsigned num_entries;
 	struct amdgpu_bo_list_entry *array;
 };
 
 struct amdgpu_bo_list *
 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+			     struct list_head *validated);
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
 
@@ -1128,6 +1091,7 @@
 	unsigned multi_gpu_tile_size;
 	unsigned mc_arb_ramcfg;
 	unsigned gb_addr_config;
+	unsigned num_rbs;
 
 	uint32_t tile_mode_array[32];
 	uint32_t macrotile_mode_array[16];
@@ -1170,23 +1134,20 @@
 	unsigned ce_ram_size;
 };
 
-int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		  unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
-int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
-		       struct amdgpu_ib *ib, void *owner);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+		       struct amdgpu_ib *ib, struct fence *last_vm_update,
+		       struct fence **f);
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-/* Ring access between begin & end cannot sleep */
-void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
 			    uint32_t **data);
 int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1196,7 +1157,6 @@
 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
 		     enum amdgpu_ring_type ring_type);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
 
 /*
  * CS.
@@ -1205,47 +1165,58 @@
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
 	uint32_t		*kdata;
-	void __user		*user_ptr;
 };
 
 struct amdgpu_cs_parser {
 	struct amdgpu_device	*adev;
 	struct drm_file		*filp;
 	struct amdgpu_ctx	*ctx;
-	struct amdgpu_bo_list *bo_list;
+
 	/* chunks */
 	unsigned		nchunks;
 	struct amdgpu_cs_chunk	*chunks;
-	/* relocations */
+
+	/* scheduler job object */
+	struct amdgpu_job	*job;
+
+	/* buffer objects */
+	struct ww_acquire_ctx		ticket;
+	struct amdgpu_bo_list		*bo_list;
 	struct amdgpu_bo_list_entry	vm_pd;
-	struct list_head	validated;
-	struct fence		*fence;
-
-	struct amdgpu_ib	*ibs;
-	uint32_t		num_ibs;
-
-	struct ww_acquire_ctx	ticket;
+	struct list_head		validated;
+	struct fence			*fence;
+	uint64_t			bytes_moved_threshold;
+	uint64_t			bytes_moved;
 
 	/* user fence */
-	struct amdgpu_user_fence	uf;
 	struct amdgpu_bo_list_entry	uf_entry;
 };
 
 struct amdgpu_job {
 	struct amd_sched_job    base;
 	struct amdgpu_device	*adev;
+	struct amdgpu_ring	*ring;
+	struct amdgpu_sync	sync;
 	struct amdgpu_ib	*ibs;
+	struct fence		*fence; /* the hw fence */
 	uint32_t		num_ibs;
 	void			*owner;
 	struct amdgpu_user_fence uf;
-	int (*free_job)(struct amdgpu_job *job);
 };
 #define to_amdgpu_job(sched_job)		\
 		container_of((sched_job), struct amdgpu_job, base)
 
-static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
+static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
+				      uint32_t ib_idx, int idx)
 {
-	return p->ibs[ib_idx].ptr[idx];
+	return p->job->ibs[ib_idx].ptr[idx];
+}
+
+static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
+				       uint32_t ib_idx, int idx,
+				       uint32_t value)
+{
+	p->job->ibs[ib_idx].ptr[idx] = value;
 }
 
 /*
@@ -1497,6 +1468,7 @@
 	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
 	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
 	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
 };
 
 struct amdgpu_vce_state {
@@ -1626,6 +1598,7 @@
 	struct amdgpu_ring	ring;
 	struct amdgpu_irq_src	irq;
 	bool			address_64_bit;
+	struct amd_sched_entity entity;
 };
 
 /*
@@ -1650,6 +1623,7 @@
 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
 	struct amdgpu_irq_src	irq;
 	unsigned		harvest_config;
+	struct amd_sched_entity	entity;
 };
 
 /*
@@ -1884,6 +1858,18 @@
 
 
 /*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
+
+
+/* GPU virtualization */
+struct amdgpu_virtualization {
+	bool supports_sr_iov;
+};
+
+/*
  * Core structure, functions and helpers.
  */
 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
@@ -1903,6 +1889,10 @@
 	struct drm_device		*ddev;
 	struct pci_dev			*pdev;
 
+#ifdef CONFIG_DRM_AMD_ACP
+	struct amdgpu_acp		acp;
+#endif
+
 	/* ASIC */
 	enum amd_asic_type		asic_type;
 	uint32_t			family;
@@ -1979,7 +1969,6 @@
 
 	/* memory management */
 	struct amdgpu_mman		mman;
-	struct amdgpu_gem		gem;
 	struct amdgpu_vram_scratch	vram_scratch;
 	struct amdgpu_wb		wb;
 	atomic64_t			vram_usage;
@@ -1997,7 +1986,6 @@
 
 	/* rings */
 	unsigned			fence_context;
-	struct mutex			ring_lock;
 	unsigned			num_rings;
 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
 	bool				ib_pool_ready;
@@ -2009,6 +1997,7 @@
 	/* powerplay */
 	struct amd_powerplay		powerplay;
 	bool				pp_enabled;
+	bool				pp_force_state_enabled;
 
 	/* dpm */
 	struct amdgpu_pm		pm;
@@ -2025,7 +2014,6 @@
 	struct amdgpu_sdma		sdma;
 
 	/* uvd */
-	bool				has_uvd;
 	struct amdgpu_uvd		uvd;
 
 	/* vce */
@@ -2050,8 +2038,7 @@
 	/* amdkfd interface */
 	struct kfd_dev          *kfd;
 
-	/* kernel conext for IB submission */
-	struct amdgpu_ctx	kernel_ctx;
+	struct amdgpu_virtualization virtualization;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2073,20 +2060,6 @@
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 
 /*
- * Cast helper
- */
-extern const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
-{
-	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
-
-	if (__f->base.ops == &amdgpu_fence_ops)
-		return __f;
-
-	return NULL;
-}
-
-/*
  * Registers read & write functions.
  */
 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
@@ -2156,7 +2129,6 @@
 	ring->ring[ring->wptr++] = v;
 	ring->wptr &= ring->ptr_mask;
 	ring->count_dw--;
-	ring->ring_free_dw--;
 }
 
 static inline struct amdgpu_sdma_instance *
@@ -2192,9 +2164,8 @@
 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
@@ -2202,11 +2173,13 @@
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
 #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
-#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2298,6 +2271,21 @@
 #define amdgpu_dpm_get_performance_level(adev) \
 	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
 
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
 	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
 
@@ -2308,7 +2296,6 @@
 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
-bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2316,11 +2303,15 @@
 		       struct amdgpu_ring **out_ring);
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 				     uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 				  unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+				       int *last_invalidated);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
new file mode 100644
index 0000000..d6b0bff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -0,0 +1,500 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <sound/designware_i2s.h>
+#include <sound/pcm.h>
+
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_acp.h"
+
+#include "acp_gfx_if.h"
+
+#define ACP_TILE_ON_MASK                0x03
+#define ACP_TILE_OFF_MASK               0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK     0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK    0x20
+
+#define ACP_TILE_P1_MASK                0x3e
+#define ACP_TILE_P2_MASK                0x3d
+#define ACP_TILE_DSP0_MASK              0x3b
+#define ACP_TILE_DSP1_MASK              0x37
+
+#define ACP_TILE_DSP2_MASK              0x2f
+
+#define ACP_DMA_REGS_END		0x146c0
+#define ACP_I2S_PLAY_REGS_START		0x14840
+#define ACP_I2S_PLAY_REGS_END		0x148b4
+#define ACP_I2S_CAP_REGS_START		0x148b8
+#define ACP_I2S_CAP_REGS_END		0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET	0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET	0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET	0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET	0x68
+
+#define mmACP_PGFSM_RETAIN_REG		0x51c9
+#define mmACP_PGFSM_CONFIG_REG		0x51ca
+#define mmACP_PGFSM_READ_REG_0		0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO	0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI	0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO	0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI	0x51fb
+
+#define ACP_TIMEOUT_LOOP		0x000000FF
+#define ACP_DEVS			3
+#define ACP_SRC_ID			162
+
+enum {
+	ACP_TILE_P1 = 0,
+	ACP_TILE_P2,
+	ACP_TILE_DSP0,
+	ACP_TILE_DSP1,
+	ACP_TILE_DSP2,
+};
+
+static int acp_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->acp.parent = adev->dev;
+
+	adev->acp.cgs_device =
+		amdgpu_cgs_create_device(adev);
+	if (!adev->acp.cgs_device)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int acp_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->acp.cgs_device)
+		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
+
+	return 0;
+}
+
+/* power off a tile/block within ACP */
+static int acp_suspend_tile(void *cgs_dev, int tile)
+{
+	u32 val = 0;
+	u32 count = 0;
+
+	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+		pr_err("Invalid ACP tile : %d to suspend\n", tile);
+		return -1;
+	}
+
+	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+	val &= ACP_TILE_ON_MASK;
+
+	if (val == 0x0) {
+		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+		val = val | (1 << tile);
+		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+					0x500 + tile);
+
+		count = ACP_TIMEOUT_LOOP;
+		while (true) {
+			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+								+ tile);
+			val = val & ACP_TILE_ON_MASK;
+			if (val == ACP_TILE_OFF_MASK)
+				break;
+			if (--count == 0) {
+				pr_err("Timeout reading ACP PGFSM status\n");
+				return -ETIMEDOUT;
+			}
+			udelay(100);
+		}
+
+		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+
+		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
+		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+	}
+	return 0;
+}
+
+/* power on a tile/block within ACP */
+static int acp_resume_tile(void *cgs_dev, int tile)
+{
+	u32 val = 0;
+	u32 count = 0;
+
+	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+		pr_err("Invalid ACP tile to resume\n");
+		return -1;
+	}
+
+	val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+	val = val & ACP_TILE_ON_MASK;
+
+	if (val != 0x0) {
+		cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+					0x600 + tile);
+		count = ACP_TIMEOUT_LOOP;
+		while (true) {
+			val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+							+ tile);
+			val = val & ACP_TILE_ON_MASK;
+			if (val == 0x0)
+				break;
+			if (--count == 0) {
+				pr_err("Timeout reading ACP PGFSM status\n");
+				return -ETIMEDOUT;
+			}
+			udelay(100);
+		}
+		val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+		if (tile == ACP_TILE_P1)
+			val = val & (ACP_TILE_P1_MASK);
+		else if (tile == ACP_TILE_P2)
+			val = val & (ACP_TILE_P2_MASK);
+
+		cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+	}
+	return 0;
+}
+
+struct acp_pm_domain {
+	void *cgs_dev;
+	struct generic_pm_domain gpd;
+};
+
+static int acp_poweroff(struct generic_pm_domain *genpd)
+{
+	int i, ret;
+	struct acp_pm_domain *apd;
+
+	apd = container_of(genpd, struct acp_pm_domain, gpd);
+	if (apd != NULL) {
+		/* Donot return abruptly if any of power tile fails to suspend.
+		 * Log it and continue powering off other tile
+		 */
+		for (i = 4; i >= 0 ; i--) {
+			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+			if (ret)
+				pr_err("ACP tile %d tile suspend failed\n", i);
+		}
+	}
+	return 0;
+}
+
+static int acp_poweron(struct generic_pm_domain *genpd)
+{
+	int i, ret;
+	struct acp_pm_domain *apd;
+
+	apd = container_of(genpd, struct acp_pm_domain, gpd);
+	if (apd != NULL) {
+		for (i = 0; i < 2; i++) {
+			ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+			if (ret) {
+				pr_err("ACP tile %d resume failed\n", i);
+				break;
+			}
+		}
+
+		/* Disable DSPs which are not going to be used */
+		for (i = 0; i < 3; i++) {
+			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
+			/* Continue suspending other DSP, even if one fails */
+			if (ret)
+				pr_err("ACP DSP %d suspend failed\n", i);
+		}
+	}
+	return 0;
+}
+
+static struct device *get_mfd_cell_dev(const char *device_name, int r)
+{
+	char auto_dev_name[25];
+	struct device *dev;
+
+	snprintf(auto_dev_name, sizeof(auto_dev_name),
+		 "%s.%d.auto", device_name, r);
+	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
+	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
+
+	return dev;
+}
+
+/**
+ * acp_hw_init - start and test ACP block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_init(void *handle)
+{
+	int r, i;
+	uint64_t acp_base;
+	struct device *dev;
+	struct i2s_platform_data *i2s_pdata;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	const struct amdgpu_ip_block_version *ip_version =
+		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+
+	if (!ip_version)
+		return -EINVAL;
+
+	r = amd_acp_hw_init(adev->acp.cgs_device,
+			    ip_version->major, ip_version->minor);
+	/* -ENODEV means board uses AZ rather than ACP */
+	if (r == -ENODEV)
+		return 0;
+	else if (r)
+		return r;
+
+	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
+			0x5289, 0, &acp_base);
+	if (r == -ENODEV)
+		return 0;
+	else if (r)
+		return r;
+
+	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+	if (adev->acp.acp_genpd == NULL)
+		return -ENOMEM;
+
+	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+
+
+	adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+
+	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+
+	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
+							GFP_KERNEL);
+
+	if (adev->acp.acp_cell == NULL)
+		return -ENOMEM;
+
+	adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
+
+	if (adev->acp.acp_res == NULL) {
+		kfree(adev->acp.acp_cell);
+		return -ENOMEM;
+	}
+
+	i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
+	if (i2s_pdata == NULL) {
+		kfree(adev->acp.acp_res);
+		kfree(adev->acp.acp_cell);
+		return -ENOMEM;
+	}
+
+	i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+	i2s_pdata[0].cap = DWC_I2S_PLAY;
+	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
+	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
+	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+
+	i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+				DW_I2S_QUIRK_COMP_PARAM1;
+	i2s_pdata[1].cap = DWC_I2S_RECORD;
+	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
+	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
+	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+
+	adev->acp.acp_res[0].name = "acp2x_dma";
+	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
+	adev->acp.acp_res[0].start = acp_base;
+	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
+
+	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
+	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
+	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
+	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
+
+	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
+	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
+	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
+	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
+
+	adev->acp.acp_res[3].name = "acp2x_dma_irq";
+	adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
+	adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
+	adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+
+	adev->acp.acp_cell[0].name = "acp_audio_dma";
+	adev->acp.acp_cell[0].num_resources = 4;
+	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+
+	adev->acp.acp_cell[1].name = "designware-i2s";
+	adev->acp.acp_cell[1].num_resources = 1;
+	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
+	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
+	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
+
+	adev->acp.acp_cell[2].name = "designware-i2s";
+	adev->acp.acp_cell[2].num_resources = 1;
+	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
+	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
+	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+
+	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
+								ACP_DEVS);
+	if (r)
+		return r;
+
+	for (i = 0; i < ACP_DEVS ; i++) {
+		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+		if (r) {
+			dev_err(dev, "Failed to add dev to genpd\n");
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * acp_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_fini(void *handle)
+{
+	int i, ret;
+	struct device *dev;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < ACP_DEVS ; i++) {
+		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+		ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+		/* If removal fails, dont giveup and try rest */
+		if (ret)
+			dev_err(dev, "remove dev from genpd failed\n");
+	}
+
+	mfd_remove_devices(adev->acp.parent);
+	kfree(adev->acp.acp_res);
+	kfree(adev->acp.acp_genpd);
+	kfree(adev->acp.acp_cell);
+
+	return 0;
+}
+
+static int acp_suspend(void *handle)
+{
+	return 0;
+}
+
+static int acp_resume(void *handle)
+{
+	int i, ret;
+	struct acp_pm_domain *apd;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* SMU block will power on ACP irrespective of ACP runtime status.
+	 * Power off explicitly based on genpd ACP runtime status so that ACP
+	 * hw and ACP-genpd status are in sync.
+	 * 'suspend_power_off' represents "Power status before system suspend"
+	*/
+	if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
+		apd = container_of(&adev->acp.acp_genpd->gpd,
+					struct acp_pm_domain, gpd);
+
+		for (i = 4; i >= 0 ; i--) {
+			ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+			if (ret)
+				pr_err("ACP tile %d tile suspend failed\n", i);
+		}
+	}
+	return 0;
+}
+
+static int acp_early_init(void *handle)
+{
+	return 0;
+}
+
+static bool acp_is_idle(void *handle)
+{
+	return true;
+}
+
+static int acp_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int acp_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static void acp_print_status(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "ACP STATUS\n");
+}
+
+static int acp_set_clockgating_state(void *handle,
+				     enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int acp_set_powergating_state(void *handle,
+				     enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs acp_ip_funcs = {
+	.early_init = acp_early_init,
+	.late_init = NULL,
+	.sw_init = acp_sw_init,
+	.sw_fini = acp_sw_fini,
+	.hw_init = acp_hw_init,
+	.hw_fini = acp_hw_fini,
+	.suspend = acp_suspend,
+	.resume = acp_resume,
+	.is_idle = acp_is_idle,
+	.wait_for_idle = acp_wait_for_idle,
+	.soft_reset = acp_soft_reset,
+	.print_status = acp_print_status,
+	.set_clockgating_state = acp_set_clockgating_state,
+	.set_powergating_state = acp_set_powergating_state,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
similarity index 72%
copy from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
copy to drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index 222f4a8..f6e32a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,20 +19,24 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
+ * Authors: AMD
+ *
  */
-#include "dmacnv50.h"
-#include "rootnv50.h"
 
-#include <nvif/class.h>
+#ifndef __AMDGPU_ACP_H__
+#define __AMDGPU_ACP_H__
 
-const struct nv50_disp_dmac_oclass
-gm204_disp_core_oclass = {
-	.base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = nv50_disp_core_new,
-	.func = &gf119_disp_core_func,
-	.mthd = &gk104_disp_core_chan_mthd,
-	.chid = 0,
+#include <linux/mfd/core.h>
+
+struct amdgpu_acp {
+	struct device *parent;
+	void *cgs_device;
+	struct amd_acp_private *private;
+	struct mfd_cell *acp_cell;
+	struct resource *acp_res;
+	struct acp_pm_domain *acp_genpd;
 };
+
+extern const struct amd_ip_funcs acp_ip_funcs;
+
+#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 84d68d6..32809f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -30,25 +30,38 @@
 const struct kgd2kfd_calls *kgd2kfd;
 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
 
-bool amdgpu_amdkfd_init(void)
+int amdgpu_amdkfd_init(void)
 {
+	int ret;
+
 #if defined(CONFIG_HSA_AMD_MODULE)
-	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
 
 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
 
 	if (kgd2kfd_init_p == NULL)
-		return false;
+		return -ENOENT;
+
+	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
+	if (ret) {
+		symbol_put(kgd2kfd_init);
+		kgd2kfd = NULL;
+	}
+
+#elif defined(CONFIG_HSA_AMD)
+	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
+	if (ret)
+		kgd2kfd = NULL;
+
+#else
+	ret = -ENOENT;
 #endif
-	return true;
+
+	return ret;
 }
 
 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
 {
-#if defined(CONFIG_HSA_AMD_MODULE)
-	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
-#endif
-
 	switch (rdev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_KAVERI:
@@ -62,35 +75,7 @@
 		return false;
 	}
 
-#if defined(CONFIG_HSA_AMD_MODULE)
-	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
-
-	if (kgd2kfd_init_p == NULL) {
-		kfd2kgd = NULL;
-		return false;
-	}
-
-	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
-		symbol_put(kgd2kfd_init);
-		kfd2kgd = NULL;
-		kgd2kfd = NULL;
-
-		return false;
-	}
-
 	return true;
-#elif defined(CONFIG_HSA_AMD)
-	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
-		kfd2kgd = NULL;
-		kgd2kfd = NULL;
-		return false;
-	}
-
-	return true;
-#else
-	kfd2kgd = NULL;
-	return false;
-#endif
 }
 
 void amdgpu_amdkfd_fini(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8be765..de530f68d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -36,7 +36,7 @@
 	void *cpu_ptr;
 };
 
-bool amdgpu_amdkfd_init(void);
+int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9416e0f..84b0ce3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1514,6 +1514,19 @@
 	return -EINVAL;
 }
 
+bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
+{
+	int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+					  &frev, &crev, &data_offset))
+		return true;
+
+	return false;
+}
+
 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
 {
 	uint32_t bios_6_scratch;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0ebb959..9e14420 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -196,6 +196,8 @@
 				      u8 module_index,
 				      struct atom_mc_reg_table *reg_table);
 
+bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
+
 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3c89586..0020a0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@
 	return amdgpu_atpx_priv.atpx_detected;
 }
 
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+	return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -142,10 +146,6 @@
  */
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
-	/* make sure required functions are enabled */
-	/* dGPU power control is required */
-	atpx->functions.power_cntl = true;
-
 	if (atpx->functions.px_params) {
 		union acpi_object *info;
 		struct atpx_px_params output;
@@ -552,13 +552,14 @@
 void amdgpu_register_atpx_handler(void)
 {
 	bool r;
+	enum vga_switcheroo_handler_flags_t handler_flags = 0;
 
 	/* detect if we have any ATPX + 2 VGA in the system */
 	r = amdgpu_atpx_detect();
 	if (!r)
 		return;
 
-	vga_switcheroo_register_handler(&amdgpu_atpx_handler);
+	vga_switcheroo_register_handler(&amdgpu_atpx_handler, handler_flags);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f82a2dd..eacd810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -32,6 +32,9 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+#define AMDGPU_BO_LIST_MAX_PRIORITY	32u
+#define AMDGPU_BO_LIST_NUM_BUCKETS	(AMDGPU_BO_LIST_MAX_PRIORITY + 1)
+
 static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
 				 struct amdgpu_bo_list **result,
 				 int *id)
@@ -88,8 +91,9 @@
 	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
 	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
 
-	bool has_userptr = false;
+	unsigned last_entry = 0, first_userptr = num_entries;
 	unsigned i;
+	int r;
 
 	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
 	if (!array)
@@ -97,33 +101,43 @@
 	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
 
 	for (i = 0; i < num_entries; ++i) {
-		struct amdgpu_bo_list_entry *entry = &array[i];
+		struct amdgpu_bo_list_entry *entry;
 		struct drm_gem_object *gobj;
+		struct amdgpu_bo *bo;
+		struct mm_struct *usermm;
 
 		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
-		if (!gobj)
+		if (!gobj) {
+			r = -ENOENT;
 			goto error_free;
-
-		entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-		drm_gem_object_unreference_unlocked(gobj);
-		entry->priority = info[i].bo_priority;
-		entry->prefered_domains = entry->robj->initial_domain;
-		entry->allowed_domains = entry->prefered_domains;
-		if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
-			entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
-		if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
-			has_userptr = true;
-			entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
-			entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 		}
+
+		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+		drm_gem_object_unreference_unlocked(gobj);
+
+		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+		if (usermm) {
+			if (usermm != current->mm) {
+				amdgpu_bo_unref(&bo);
+				r = -EPERM;
+				goto error_free;
+			}
+			entry = &array[--first_userptr];
+		} else {
+			entry = &array[last_entry++];
+		}
+
+		entry->robj = bo;
+		entry->priority = min(info[i].bo_priority,
+				      AMDGPU_BO_LIST_MAX_PRIORITY);
 		entry->tv.bo = &entry->robj->tbo;
 		entry->tv.shared = true;
 
-		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
 			gds_obj = entry->robj;
-		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
 			gws_obj = entry->robj;
-		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
 			oa_obj = entry->robj;
 
 		trace_amdgpu_bo_list_set(list, entry->robj);
@@ -137,15 +151,17 @@
 	list->gds_obj = gds_obj;
 	list->gws_obj = gws_obj;
 	list->oa_obj = oa_obj;
-	list->has_userptr = has_userptr;
+	list->first_userptr = first_userptr;
 	list->array = array;
 	list->num_entries = num_entries;
 
 	return 0;
 
 error_free:
+	while (i--)
+		amdgpu_bo_unref(&array[i].robj);
 	drm_free_large(array);
-	return -ENOENT;
+	return r;
 }
 
 struct amdgpu_bo_list *
@@ -161,6 +177,37 @@
 	return result;
 }
 
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+			     struct list_head *validated)
+{
+	/* This is based on the bucket sort with O(n) time complexity.
+	 * An item with priority "i" is added to bucket[i]. The lists are then
+	 * concatenated in descending order.
+	 */
+	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
+		INIT_LIST_HEAD(&bucket[i]);
+
+	/* Since buffers which appear sooner in the relocation list are
+	 * likely to be used more often than buffers which appear later
+	 * in the list, the sort mustn't change the ordering of buffers
+	 * with the same priority, i.e. it must be stable.
+	 */
+	for (i = 0; i < list->num_entries; i++) {
+		unsigned priority = list->array[i].priority;
+
+		list_add_tail(&list->array[i].tv.head,
+			      &bucket[priority]);
+		list->array[i].user_pages = NULL;
+	}
+
+	/* Connect the sorted buckets in the output list. */
+	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
+		list_splice(&bucket[i], validated);
+}
+
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
 {
 	mutex_unlock(&list->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b882e81..9392e50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,52 +25,12 @@
  *    Jerome Glisse <glisse@freedesktop.org>
  */
 #include <linux/list_sort.h>
+#include <linux/pagemap.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
-#define AMDGPU_CS_MAX_PRIORITY		32u
-#define AMDGPU_CS_NUM_BUCKETS		(AMDGPU_CS_MAX_PRIORITY + 1)
-
-/* This is based on the bucket sort with O(n) time complexity.
- * An item with priority "i" is added to bucket[i]. The lists are then
- * concatenated in descending order.
- */
-struct amdgpu_cs_buckets {
-	struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
-};
-
-static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
-{
-	unsigned i;
-
-	for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
-		INIT_LIST_HEAD(&b->bucket[i]);
-}
-
-static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
-				  struct list_head *item, unsigned priority)
-{
-	/* Since buffers which appear sooner in the relocation list are
-	 * likely to be used more often than buffers which appear later
-	 * in the list, the sort mustn't change the ordering of buffers
-	 * with the same priority, i.e. it must be stable.
-	 */
-	list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
-}
-
-static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
-				       struct list_head *out_list)
-{
-	unsigned i;
-
-	/* Connect the sorted buckets in the output list. */
-	for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
-		list_splice(&b->bucket[i], out_list);
-	}
-}
-
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
 		       struct amdgpu_ring **out_ring)
@@ -128,6 +88,7 @@
 }
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+				      struct amdgpu_user_fence *uf,
 				      struct drm_amdgpu_cs_chunk_fence *fence_data)
 {
 	struct drm_gem_object *gobj;
@@ -139,20 +100,19 @@
 	if (gobj == NULL)
 		return -EINVAL;
 
-	p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-	p->uf.offset = fence_data->offset;
+	uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+	uf->offset = fence_data->offset;
 
-	if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
+	if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
 		drm_gem_object_unreference_unlocked(gobj);
 		return -EINVAL;
 	}
 
-	p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
-	p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
-	p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+	p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
 	p->uf_entry.priority = 0;
 	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
 	p->uf_entry.tv.shared = true;
+	p->uf_entry.user_pages = NULL;
 
 	drm_gem_object_unreference_unlocked(gobj);
 	return 0;
@@ -160,11 +120,12 @@
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	union drm_amdgpu_cs *cs = data;
 	uint64_t *chunk_array_user;
 	uint64_t *chunk_array;
-	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-	unsigned size;
+	struct amdgpu_user_fence uf = {};
+	unsigned size, num_ibs = 0;
 	int i;
 	int ret;
 
@@ -181,15 +142,12 @@
 		goto free_chunk;
 	}
 
-	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
-
 	/* get chunks */
-	INIT_LIST_HEAD(&p->validated);
 	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		ret = -EFAULT;
-		goto put_bo_list;
+		goto put_ctx;
 	}
 
 	p->nchunks = cs->in.num_chunks;
@@ -197,7 +155,7 @@
 			    GFP_KERNEL);
 	if (!p->chunks) {
 		ret = -ENOMEM;
-		goto put_bo_list;
+		goto put_ctx;
 	}
 
 	for (i = 0; i < p->nchunks; i++) {
@@ -217,7 +175,6 @@
 
 		size = p->chunks[i].length_dw;
 		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
-		p->chunks[i].user_ptr = cdata;
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
 		if (p->chunks[i].kdata == NULL) {
@@ -233,7 +190,7 @@
 
 		switch (p->chunks[i].chunk_id) {
 		case AMDGPU_CHUNK_ID_IB:
-			p->num_ibs++;
+			++num_ibs;
 			break;
 
 		case AMDGPU_CHUNK_ID_FENCE:
@@ -243,7 +200,7 @@
 				goto free_partial_kdata;
 			}
 
-			ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
+			ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
 			if (ret)
 				goto free_partial_kdata;
 
@@ -258,12 +215,11 @@
 		}
 	}
 
-
-	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!p->ibs) {
-		ret = -ENOMEM;
+	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
+	if (ret)
 		goto free_all_kdata;
-	}
+
+	p->job->uf = uf;
 
 	kfree(chunk_array);
 	return 0;
@@ -274,9 +230,7 @@
 	for (; i >= 0; i--)
 		drm_free_large(p->chunks[i].kdata);
 	kfree(p->chunks);
-put_bo_list:
-	if (p->bo_list)
-		amdgpu_bo_list_put(p->bo_list);
+put_ctx:
 	amdgpu_ctx_put(p->ctx);
 free_chunk:
 	kfree(chunk_array);
@@ -336,96 +290,198 @@
 	return max(bytes_moved_threshold, 1024*1024ull);
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_device *adev,
-			    struct amdgpu_vm *vm,
+int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
 	struct amdgpu_bo_list_entry *lobj;
-	struct amdgpu_bo *bo;
-	u64 bytes_moved = 0, initial_bytes_moved;
-	u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
+	u64 initial_bytes_moved;
 	int r;
 
 	list_for_each_entry(lobj, validated, tv.head) {
-		bo = lobj->robj;
-		if (!bo->pin_count) {
-			u32 domain = lobj->prefered_domains;
-			u32 current_domain =
-				amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+		struct amdgpu_bo *bo = lobj->robj;
+		bool binding_userptr = false;
+		struct mm_struct *usermm;
+		uint32_t domain;
 
-			/* Check if this buffer will be moved and don't move it
-			 * if we have moved too many buffers for this IB already.
-			 *
-			 * Note that this allows moving at least one buffer of
-			 * any size, because it doesn't take the current "bo"
-			 * into account. We don't want to disallow buffer moves
-			 * completely.
-			 */
-			if ((lobj->allowed_domains & current_domain) != 0 &&
-			    (domain & current_domain) == 0 && /* will be moved */
-			    bytes_moved > bytes_moved_threshold) {
-				/* don't move it */
-				domain = current_domain;
-			}
+		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+		if (usermm && usermm != current->mm)
+			return -EPERM;
 
-		retry:
-			amdgpu_ttm_placement_from_domain(bo, domain);
-			initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-			bytes_moved += atomic64_read(&adev->num_bytes_moved) -
-				       initial_bytes_moved;
+		/* Check if we have user pages and nobody bound the BO already */
+		if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
+			size_t size = sizeof(struct page *);
 
-			if (unlikely(r)) {
-				if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
-					domain = lobj->allowed_domains;
-					goto retry;
-				}
-				return r;
-			}
+			size *= bo->tbo.ttm->num_pages;
+			memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+			binding_userptr = true;
 		}
-		lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
+
+		if (bo->pin_count)
+			continue;
+
+		/* Avoid moving this one if we have moved too many buffers
+		 * for this IB already.
+		 *
+		 * Note that this allows moving at least one buffer of
+		 * any size, because it doesn't take the current "bo"
+		 * into account. We don't want to disallow buffer moves
+		 * completely.
+		 */
+		if (p->bytes_moved <= p->bytes_moved_threshold)
+			domain = bo->prefered_domains;
+		else
+			domain = bo->allowed_domains;
+
+	retry:
+		amdgpu_ttm_placement_from_domain(bo, domain);
+		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+			       initial_bytes_moved;
+
+		if (unlikely(r)) {
+			if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
+				domain = bo->allowed_domains;
+				goto retry;
+			}
+			return r;
+		}
+
+		if (binding_userptr) {
+			drm_free_large(lobj->user_pages);
+			lobj->user_pages = NULL;
+		}
 	}
 	return 0;
 }
 
-static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
+static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+				union drm_amdgpu_cs *cs)
 {
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-	struct amdgpu_cs_buckets buckets;
+	struct amdgpu_bo_list_entry *e;
 	struct list_head duplicates;
 	bool need_mmap_lock = false;
-	int i, r;
+	unsigned i, tries = 10;
+	int r;
 
+	INIT_LIST_HEAD(&p->validated);
+
+	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
 	if (p->bo_list) {
-		need_mmap_lock = p->bo_list->has_userptr;
-		amdgpu_cs_buckets_init(&buckets);
-		for (i = 0; i < p->bo_list->num_entries; i++)
-			amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
-								  p->bo_list->array[i].priority);
-
-		amdgpu_cs_buckets_get_list(&buckets, &p->validated);
+		need_mmap_lock = p->bo_list->first_userptr !=
+			p->bo_list->num_entries;
+		amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 	}
 
 	INIT_LIST_HEAD(&duplicates);
 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
-	if (p->uf.bo)
+	if (p->job->uf.bo)
 		list_add(&p->uf_entry.tv.head, &p->validated);
 
 	if (need_mmap_lock)
 		down_read(&current->mm->mmap_sem);
 
-	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
-	if (unlikely(r != 0))
-		goto error_reserve;
+	while (1) {
+		struct list_head need_pages;
+		unsigned i;
+
+		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+					   &duplicates);
+		if (unlikely(r != 0))
+			goto error_free_pages;
+
+		/* Without a BO list we don't have userptr BOs */
+		if (!p->bo_list)
+			break;
+
+		INIT_LIST_HEAD(&need_pages);
+		for (i = p->bo_list->first_userptr;
+		     i < p->bo_list->num_entries; ++i) {
+
+			e = &p->bo_list->array[i];
+
+			if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+				 &e->user_invalidated) && e->user_pages) {
+
+				/* We acquired a page array, but somebody
+				 * invalidated it. Free it an try again
+				 */
+				release_pages(e->user_pages,
+					      e->robj->tbo.ttm->num_pages,
+					      false);
+				drm_free_large(e->user_pages);
+				e->user_pages = NULL;
+			}
+
+			if (e->robj->tbo.ttm->state != tt_bound &&
+			    !e->user_pages) {
+				list_del(&e->tv.head);
+				list_add(&e->tv.head, &need_pages);
+
+				amdgpu_bo_unreserve(e->robj);
+			}
+		}
+
+		if (list_empty(&need_pages))
+			break;
+
+		/* Unreserve everything again. */
+		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+		/* We tried to often, just abort */
+		if (!--tries) {
+			r = -EDEADLK;
+			goto error_free_pages;
+		}
+
+		/* Fill the page arrays for all useptrs. */
+		list_for_each_entry(e, &need_pages, tv.head) {
+			struct ttm_tt *ttm = e->robj->tbo.ttm;
+
+			e->user_pages = drm_calloc_large(ttm->num_pages,
+							 sizeof(struct page*));
+			if (!e->user_pages) {
+				r = -ENOMEM;
+				goto error_free_pages;
+			}
+
+			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
+			if (r) {
+				drm_free_large(e->user_pages);
+				e->user_pages = NULL;
+				goto error_free_pages;
+			}
+		}
+
+		/* And try again. */
+		list_splice(&need_pages, &p->validated);
+	}
 
 	amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
 
-	r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+	p->bytes_moved = 0;
+
+	r = amdgpu_cs_list_validate(p, &duplicates);
 	if (r)
 		goto error_validate;
 
-	r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+	r = amdgpu_cs_list_validate(p, &p->validated);
+	if (r)
+		goto error_validate;
+
+	if (p->bo_list) {
+		struct amdgpu_vm *vm = &fpriv->vm;
+		unsigned i;
+
+		for (i = 0; i < p->bo_list->num_entries; i++) {
+			struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
+		}
+	}
 
 error_validate:
 	if (r) {
@@ -433,10 +489,26 @@
 		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 	}
 
-error_reserve:
+error_free_pages:
+
 	if (need_mmap_lock)
 		up_read(&current->mm->mmap_sem);
 
+	if (p->bo_list) {
+		for (i = p->bo_list->first_userptr;
+		     i < p->bo_list->num_entries; ++i) {
+			e = &p->bo_list->array[i];
+
+			if (!e->user_pages)
+				continue;
+
+			release_pages(e->user_pages,
+				      e->robj->tbo.ttm->num_pages,
+				      false);
+			drm_free_large(e->user_pages);
+		}
+	}
+
 	return r;
 }
 
@@ -447,7 +519,7 @@
 
 	list_for_each_entry(e, &p->validated, tv.head) {
 		struct reservation_object *resv = e->robj->tbo.resv;
-		r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
+		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
 
 		if (r)
 			return r;
@@ -510,11 +582,8 @@
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
-	if (parser->ibs)
-		for (i = 0; i < parser->num_ibs; i++)
-			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-	kfree(parser->ibs);
-	amdgpu_bo_unref(&parser->uf.bo);
+	if (parser->job)
+		amdgpu_job_free(parser->job);
 	amdgpu_bo_unref(&parser->uf_entry.robj);
 }
 
@@ -530,7 +599,7 @@
 	if (r)
 		return r;
 
-	r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+	r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
 	if (r)
 		return r;
 
@@ -556,14 +625,14 @@
 				return r;
 
 			f = bo_va->last_pt_update;
-			r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+			r = amdgpu_sync_fence(adev, &p->job->sync, f);
 			if (r)
 				return r;
 		}
 
 	}
 
-	r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+	r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
 
 	if (amdgpu_vm_debug && p->bo_list) {
 		/* Invalidate all BOs to test for userspace bugs */
@@ -581,29 +650,25 @@
 }
 
 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
-				 struct amdgpu_cs_parser *parser)
+				 struct amdgpu_cs_parser *p)
 {
-	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
-	struct amdgpu_ring *ring;
+	struct amdgpu_ring *ring = p->job->ring;
 	int i, r;
 
-	if (parser->num_ibs == 0)
-		return 0;
-
 	/* Only for UVD/VCE VM emulation */
-	for (i = 0; i < parser->num_ibs; i++) {
-		ring = parser->ibs[i].ring;
-		if (ring->funcs->parse_cs) {
-			r = amdgpu_ring_parse_cs(ring, parser, i);
+	if (ring->funcs->parse_cs) {
+		for (i = 0; i < p->job->num_ibs; i++) {
+			r = amdgpu_ring_parse_cs(ring, p, i);
 			if (r)
 				return r;
 		}
 	}
 
-	r = amdgpu_bo_vm_update_pte(parser, vm);
+	r = amdgpu_bo_vm_update_pte(p, vm);
 	if (!r)
-		amdgpu_cs_sync_rings(parser);
+		amdgpu_cs_sync_rings(p);
 
 	return r;
 }
@@ -626,14 +691,14 @@
 	int i, j;
 	int r;
 
-	for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
+	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 		struct amdgpu_cs_chunk *chunk;
 		struct amdgpu_ib *ib;
 		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 		struct amdgpu_ring *ring;
 
 		chunk = &parser->chunks[i];
-		ib = &parser->ibs[j];
+		ib = &parser->job->ibs[j];
 		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 
 		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
@@ -645,6 +710,11 @@
 		if (r)
 			return r;
 
+		if (parser->job->ring && parser->job->ring != ring)
+			return -EINVAL;
+
+		parser->job->ring = ring;
+
 		if (ring->funcs->parse_cs) {
 			struct amdgpu_bo_va_mapping *m;
 			struct amdgpu_bo *aobj = NULL;
@@ -673,7 +743,7 @@
 			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
 			kptr += chunk_ib->va_start - offset;
 
-			r =  amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
+			r =  amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
 			if (r) {
 				DRM_ERROR("Failed to get ib !\n");
 				return r;
@@ -682,7 +752,7 @@
 			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 			amdgpu_bo_kunmap(aobj);
 		} else {
-			r =  amdgpu_ib_get(ring, vm, 0, ib);
+			r =  amdgpu_ib_get(adev, vm, 0, ib);
 			if (r) {
 				DRM_ERROR("Failed to get ib !\n");
 				return r;
@@ -697,15 +767,12 @@
 		j++;
 	}
 
-	if (!parser->num_ibs)
-		return 0;
-
 	/* add GDS resources to first IB */
 	if (parser->bo_list) {
 		struct amdgpu_bo *gds = parser->bo_list->gds_obj;
 		struct amdgpu_bo *gws = parser->bo_list->gws_obj;
 		struct amdgpu_bo *oa = parser->bo_list->oa_obj;
-		struct amdgpu_ib *ib = &parser->ibs[0];
+		struct amdgpu_ib *ib = &parser->job->ibs[0];
 
 		if (gds) {
 			ib->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -721,15 +788,15 @@
 		}
 	}
 	/* wrap the last IB with user fence */
-	if (parser->uf.bo) {
-		struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
+	if (parser->job->uf.bo) {
+		struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
 
 		/* UVD & VCE fw doesn't support user fences */
-		if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
-		    ib->ring->type == AMDGPU_RING_TYPE_VCE)
+		if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+		    parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
 			return -EINVAL;
 
-		ib->user = &parser->uf;
+		ib->user = &parser->job->uf;
 	}
 
 	return 0;
@@ -739,14 +806,8 @@
 				  struct amdgpu_cs_parser *p)
 {
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-	struct amdgpu_ib *ib;
 	int i, j, r;
 
-	if (!p->num_ibs)
-		return 0;
-
-	/* Add dependencies to first IB */
-	ib = &p->ibs[0];
 	for (i = 0; i < p->nchunks; ++i) {
 		struct drm_amdgpu_cs_chunk_dep *deps;
 		struct amdgpu_cs_chunk *chunk;
@@ -784,7 +845,8 @@
 				return r;
 
 			} else if (fence) {
-				r = amdgpu_sync_fence(adev, &ib->sync, fence);
+				r = amdgpu_sync_fence(adev, &p->job->sync,
+						      fence);
 				fence_put(fence);
 				amdgpu_ctx_put(ctx);
 				if (r)
@@ -796,15 +858,36 @@
 	return 0;
 }
 
-static int amdgpu_cs_free_job(struct amdgpu_job *job)
+static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+			    union drm_amdgpu_cs *cs)
 {
-	int i;
-	if (job->ibs)
-		for (i = 0; i < job->num_ibs; i++)
-			amdgpu_ib_free(job->adev, &job->ibs[i]);
-	kfree(job->ibs);
-	if (job->uf.bo)
-		amdgpu_bo_unref(&job->uf.bo);
+	struct amdgpu_ring *ring = p->job->ring;
+	struct amd_sched_fence *fence;
+	struct amdgpu_job *job;
+
+	job = p->job;
+	p->job = NULL;
+
+	job->base.sched = &ring->sched;
+	job->base.s_entity = &p->ctx->rings[ring->idx].entity;
+	job->owner = p->filp;
+
+	fence = amd_sched_fence_create(job->base.s_entity, p->filp);
+	if (!fence) {
+		amdgpu_job_free(job);
+		return -ENOMEM;
+	}
+
+	job->base.s_fence = fence;
+	p->fence = fence_get(&fence->base);
+
+	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
+					      &fence->base);
+	job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+
+	trace_amdgpu_cs_ioctl(job);
+	amd_sched_entity_push_job(&job->base);
+
 	return 0;
 }
 
@@ -829,7 +912,7 @@
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
 	}
-	r = amdgpu_cs_parser_relocs(&parser);
+	r = amdgpu_cs_parser_bos(&parser, data);
 	if (r == -ENOMEM)
 		DRM_ERROR("Not enough memory for command submission!\n");
 	else if (r && r != -ERESTARTSYS)
@@ -848,68 +931,14 @@
 	if (r)
 		goto out;
 
-	for (i = 0; i < parser.num_ibs; i++)
+	for (i = 0; i < parser.job->num_ibs; i++)
 		trace_amdgpu_cs(&parser, i);
 
 	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
 	if (r)
 		goto out;
 
-	if (amdgpu_enable_scheduler && parser.num_ibs) {
-		struct amdgpu_ring * ring = parser.ibs->ring;
-		struct amd_sched_fence *fence;
-		struct amdgpu_job *job;
-
-		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
-		if (!job) {
-			r = -ENOMEM;
-			goto out;
-		}
-
-		job->base.sched = &ring->sched;
-		job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
-		job->adev = parser.adev;
-		job->owner = parser.filp;
-		job->free_job = amdgpu_cs_free_job;
-
-		job->ibs = parser.ibs;
-		job->num_ibs = parser.num_ibs;
-		parser.ibs = NULL;
-		parser.num_ibs = 0;
-
-		if (job->ibs[job->num_ibs - 1].user) {
-			job->uf = parser.uf;
-			job->ibs[job->num_ibs - 1].user = &job->uf;
-			parser.uf.bo = NULL;
-		}
-
-		fence = amd_sched_fence_create(job->base.s_entity,
-					       parser.filp);
-		if (!fence) {
-			r = -ENOMEM;
-			amdgpu_cs_free_job(job);
-			kfree(job);
-			goto out;
-		}
-		job->base.s_fence = fence;
-		parser.fence = fence_get(&fence->base);
-
-		cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
-						      &fence->base);
-		job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
-
-		trace_amdgpu_cs_ioctl(job);
-		amd_sched_entity_push_job(&job->base);
-
-	} else {
-		struct amdgpu_fence *fence;
-
-		r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
-				       parser.filp);
-		fence = parser.ibs[parser.num_ibs - 1].fence;
-		parser.fence = fence_get(&fence->base);
-		cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
-	}
+	r = amdgpu_cs_submit(&parser, cs);
 
 out:
 	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
@@ -980,30 +1009,36 @@
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 		       uint64_t addr, struct amdgpu_bo **bo)
 {
-	struct amdgpu_bo_list_entry *reloc;
 	struct amdgpu_bo_va_mapping *mapping;
+	unsigned i;
+
+	if (!parser->bo_list)
+		return NULL;
 
 	addr /= AMDGPU_GPU_PAGE_SIZE;
 
-	list_for_each_entry(reloc, &parser->validated, tv.head) {
-		if (!reloc->bo_va)
+	for (i = 0; i < parser->bo_list->num_entries; i++) {
+		struct amdgpu_bo_list_entry *lobj;
+
+		lobj = &parser->bo_list->array[i];
+		if (!lobj->bo_va)
 			continue;
 
-		list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+		list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
 			if (mapping->it.start > addr ||
 			    addr > mapping->it.last)
 				continue;
 
-			*bo = reloc->bo_va->bo;
+			*bo = lobj->bo_va->bo;
 			return mapping;
 		}
 
-		list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
+		list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
 			if (mapping->it.start > addr ||
 			    addr > mapping->it.last)
 				continue;
 
-			*bo = reloc->bo_va->bo;
+			*bo = lobj->bo_va->bo;
 			return mapping;
 		}
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17d1fb1..17e1362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,8 +25,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
-		    struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
 {
 	unsigned i, j;
 	int r;
@@ -35,44 +34,38 @@
 	ctx->adev = adev;
 	kref_init(&ctx->refcount);
 	spin_lock_init(&ctx->ring_lock);
-	ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
-			 AMDGPU_MAX_RINGS, GFP_KERNEL);
+	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+			      sizeof(struct fence*), GFP_KERNEL);
 	if (!ctx->fences)
 		return -ENOMEM;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		ctx->rings[i].sequence = 1;
-		ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
-			amdgpu_sched_jobs * i;
+		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
 	}
-	if (amdgpu_enable_scheduler) {
-		/* create context entity for each ring */
-		for (i = 0; i < adev->num_rings; i++) {
-			struct amd_sched_rq *rq;
-			if (pri >= AMD_SCHED_MAX_PRIORITY) {
-				kfree(ctx->fences);
-				return -EINVAL;
-			}
-			rq = &adev->rings[i]->sched.sched_rq[pri];
-			r = amd_sched_entity_init(&adev->rings[i]->sched,
-						  &ctx->rings[i].entity,
-						  rq, amdgpu_sched_jobs);
-			if (r)
-				break;
-		}
+	/* create context entity for each ring */
+	for (i = 0; i < adev->num_rings; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		struct amd_sched_rq *rq;
 
-		if (i < adev->num_rings) {
-			for (j = 0; j < i; j++)
-				amd_sched_entity_fini(&adev->rings[j]->sched,
-						      &ctx->rings[j].entity);
-			kfree(ctx->fences);
-			return r;
-		}
+		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+					  rq, amdgpu_sched_jobs);
+		if (r)
+			break;
+	}
+
+	if (i < adev->num_rings) {
+		for (j = 0; j < i; j++)
+			amd_sched_entity_fini(&adev->rings[j]->sched,
+					      &ctx->rings[j].entity);
+		kfree(ctx->fences);
+		return r;
 	}
 	return 0;
 }
 
-void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 {
 	struct amdgpu_device *adev = ctx->adev;
 	unsigned i, j;
@@ -85,11 +78,9 @@
 			fence_put(ctx->rings[i].fences[j]);
 	kfree(ctx->fences);
 
-	if (amdgpu_enable_scheduler) {
-		for (i = 0; i < adev->num_rings; i++)
-			amd_sched_entity_fini(&adev->rings[i]->sched,
-					      &ctx->rings[i].entity);
-	}
+	for (i = 0; i < adev->num_rings; i++)
+		amd_sched_entity_fini(&adev->rings[i]->sched,
+				      &ctx->rings[i].entity);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -112,7 +103,7 @@
 		return r;
 	}
 	*id = (uint32_t)r;
-	r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
+	r = amdgpu_ctx_init(adev, ctx);
 	if (r) {
 		idr_remove(&mgr->ctx_handles, *id);
 		*id = 0;
@@ -200,18 +191,18 @@
 	id = args->in.ctx_id;
 
 	switch (args->in.op) {
-		case AMDGPU_CTX_OP_ALLOC_CTX:
-			r = amdgpu_ctx_alloc(adev, fpriv, &id);
-			args->out.alloc.ctx_id = id;
-			break;
-		case AMDGPU_CTX_OP_FREE_CTX:
-			r = amdgpu_ctx_free(fpriv, id);
-			break;
-		case AMDGPU_CTX_OP_QUERY_STATE:
-			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
-			break;
-		default:
-			return -EINVAL;
+	case AMDGPU_CTX_OP_ALLOC_CTX:
+		r = amdgpu_ctx_alloc(adev, fpriv, &id);
+		args->out.alloc.ctx_id = id;
+		break;
+	case AMDGPU_CTX_OP_FREE_CTX:
+		r = amdgpu_ctx_free(fpriv, id);
+		break;
+	case AMDGPU_CTX_OP_QUERY_STATE:
+		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 51bfc11..61211747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,6 +62,12 @@
 	"LAST",
 };
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
 bool amdgpu_device_is_px(struct drm_device *dev)
 {
 	struct amdgpu_device *adev = dev->dev_private;
@@ -636,31 +642,6 @@
 }
 
 /**
- * amdgpu_boot_test_post_card - check and possibly initialize the hw
- *
- * @adev: amdgpu_device pointer
- *
- * Check if the asic is initialized and if not, attempt to initialize
- * it (all asics).
- * Returns true if initialized or false if not.
- */
-bool amdgpu_boot_test_post_card(struct amdgpu_device *adev)
-{
-	if (amdgpu_card_posted(adev))
-		return true;
-
-	if (adev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		if (adev->is_atom_bios)
-			amdgpu_atom_asic_init(adev->mode_info.atom_context);
-		return true;
-	} else {
-		dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
-		return false;
-	}
-}
-
-/**
  * amdgpu_dummy_page_init - init dummy page used by the driver
  *
  * @adev: amdgpu_device pointer
@@ -959,12 +940,6 @@
 			 amdgpu_sched_jobs);
 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 	}
-	/* vramlimit must be a power of two */
-	if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
-		dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
-				amdgpu_vram_limit);
-		amdgpu_vram_limit = 0;
-	}
 
 	if (amdgpu_gart_size != -1) {
 		/* gtt size must be power of two and greater or equal to 32M */
@@ -1434,7 +1409,7 @@
 	adev->mman.buffer_funcs = NULL;
 	adev->mman.buffer_funcs_ring = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
-	adev->vm_manager.vm_pte_funcs_ring = NULL;
+	adev->vm_manager.vm_pte_num_rings = 0;
 	adev->gart.gart_funcs = NULL;
 	adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
 
@@ -1455,9 +1430,8 @@
 
 	/* mutex initialization are all done here so we
 	 * can recall function without having locking issues */
-	mutex_init(&adev->ring_lock);
+	mutex_init(&adev->vm_manager.lock);
 	atomic_set(&adev->irq.ih.lock, 0);
-	mutex_init(&adev->gem.mutex);
 	mutex_init(&adev->pm.mutex);
 	mutex_init(&adev->gfx.gpu_clock_mutex);
 	mutex_init(&adev->srbm_mutex);
@@ -1511,7 +1485,7 @@
 
 	if (amdgpu_runtime_pm == 1)
 		runtime = true;
-	if (amdgpu_device_is_px(ddev))
+	if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
 		runtime = true;
 	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
 	if (runtime)
@@ -1531,8 +1505,13 @@
 		return r;
 	}
 
+	/* See if the asic supports SR-IOV */
+	adev->virtualization.supports_sr_iov =
+		amdgpu_atombios_has_gpu_virtualization_table(adev);
+
 	/* Post card if necessary */
-	if (!amdgpu_card_posted(adev)) {
+	if (!amdgpu_card_posted(adev) ||
+	    adev->virtualization.supports_sr_iov) {
 		if (!adev->bios) {
 			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
 			return -EINVAL;
@@ -1577,11 +1556,6 @@
 		return r;
 	}
 
-	r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
-	if (r) {
-		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
-		return r;
-	}
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1645,7 +1619,6 @@
 	adev->shutdown = true;
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
-	amdgpu_ctx_fini(&adev->kernel_ctx);
 	amdgpu_ib_pool_fini(adev);
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
@@ -1894,6 +1867,9 @@
 
 retry:
 	r = amdgpu_asic_reset(adev);
+	/* post card */
+	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
 	if (!r) {
 		dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
 		r = amdgpu_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 1846d65..f0ed974 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,32 +35,30 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
-static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
-				   struct fence **f)
+static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
 {
-	struct amdgpu_fence *fence;
-	long r;
+	struct amdgpu_flip_work *work =
+		container_of(cb, struct amdgpu_flip_work, cb);
 
-	if (*f == NULL)
-		return;
+	fence_put(f);
+	schedule_work(&work->flip_work);
+}
 
-	fence = to_amdgpu_fence(*f);
-	if (fence) {
-		r = fence_wait(&fence->base, false);
-		if (r == -EDEADLK)
-			r = amdgpu_gpu_reset(adev);
-	} else
-		r = fence_wait(*f, false);
+static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
+				     struct fence **f)
+{
+	struct fence *fence= *f;
 
-	if (r)
-		DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+	if (fence == NULL)
+		return false;
 
-	/* We continue with the page flip even if we failed to wait on
-	 * the fence, otherwise the DRM core and userspace will be
-	 * confused about which BO the CRTC is scanning out
-	 */
-	fence_put(*f);
 	*f = NULL;
+
+	if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+		return true;
+
+	fence_put(*f);
+	return false;
 }
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
@@ -76,9 +74,12 @@
 	int vpos, hpos, stat, min_udelay = 0;
 	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
-	amdgpu_flip_wait_fence(adev, &work->excl);
+	if (amdgpu_flip_handle_fence(work, &work->excl))
+		return;
+
 	for (i = 0; i < work->shared_count; ++i)
-		amdgpu_flip_wait_fence(adev, &work->shared[i]);
+		if (amdgpu_flip_handle_fence(work, &work->shared[i]))
+			return;
 
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -130,12 +131,12 @@
 				 vblank->framedur_ns / 1000,
 				 vblank->linedur_ns / 1000, stat, vpos, hpos);
 
-	/* do the flip (mmio) */
-	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	/* set the flip status */
 	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
-
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	/* Do the flip (mmio) */
+	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 }
 
 /*
@@ -254,7 +255,7 @@
 	/* update crtc fb */
 	crtc->primary->fb = fb;
 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-	queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
+	amdgpu_flip_work_func(&work->flip_work);
 	return 0;
 
 vblank_cleanup:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9ef1db8..f1e17d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,7 +69,6 @@
 int amdgpu_smc_load_fw = 1;
 int amdgpu_aspm = -1;
 int amdgpu_runtime_pm = -1;
-int amdgpu_hard_reset = 0;
 unsigned amdgpu_ip_block_mask = 0xffffffff;
 int amdgpu_bapm = -1;
 int amdgpu_deep_color = 0;
@@ -78,10 +77,8 @@
 int amdgpu_vm_fault_stop = 0;
 int amdgpu_vm_debug = 0;
 int amdgpu_exp_hw_support = 0;
-int amdgpu_enable_scheduler = 1;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_enable_semaphores = 0;
 int amdgpu_powerplay = -1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
@@ -128,9 +125,6 @@
 MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
 module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
 
-MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
-module_param_named(hard_reset, amdgpu_hard_reset, int, 0444);
-
 MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
 module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
 
@@ -155,18 +149,12 @@
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
-MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
-module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
-
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
-MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
-module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
-
 #ifdef CONFIG_DRM_AMD_POWERPLAY
 MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
@@ -330,6 +318,14 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * Initialize amdkfd before starting radeon. If it was not loaded yet,
+	 * defer radeon probing
+	 */
+	ret = amdgpu_amdkfd_init();
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
 	/* Get rid of things like offb */
 	ret = amdgpu_kick_out_firmware_fb(pdev);
 	if (ret)
@@ -559,6 +555,7 @@
 
 static int __init amdgpu_init(void)
 {
+	amdgpu_sync_init();
 #ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force()) {
 		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@@ -572,8 +569,6 @@
 	driver->num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
-	amdgpu_amdkfd_init();
-
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
@@ -583,6 +578,7 @@
 	amdgpu_amdkfd_fini();
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
+	amdgpu_sync_fini();
 }
 
 module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3671f9f..4303b44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,9 +47,30 @@
  * that the the relevant GPU caches have been flushed.
  */
 
+struct amdgpu_fence {
+	struct fence base;
+
+	/* RB, DMA, etc. */
+	struct amdgpu_ring		*ring;
+};
+
 static struct kmem_cache *amdgpu_fence_slab;
 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
 
+/*
+ * Cast helper
+ */
+static const struct fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+{
+	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+	if (__f->base.ops == &amdgpu_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
 /**
  * amdgpu_fence_write - write a fence value
  *
@@ -82,7 +103,7 @@
 	if (drv->cpu_addr)
 		seq = le32_to_cpu(*drv->cpu_addr);
 	else
-		seq = lower_32_bits(atomic64_read(&drv->last_seq));
+		seq = atomic_read(&drv->last_seq);
 
 	return seq;
 }
@@ -91,32 +112,41 @@
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
  * @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @fence: amdgpu fence object
+ * @f: resulting fence object
  *
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
-		      struct amdgpu_fence **fence)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_fence *fence;
+	struct fence **ptr;
+	uint32_t seq;
 
-	/* we are protected by the ring emission mutex */
-	*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
-	if ((*fence) == NULL) {
+	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
+	if (fence == NULL)
 		return -ENOMEM;
-	}
-	(*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
-	(*fence)->ring = ring;
-	(*fence)->owner = owner;
-	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&ring->fence_drv.fence_queue.lock,
-		adev->fence_context + ring->idx,
-		(*fence)->seq);
+
+	seq = ++ring->fence_drv.sync_seq;
+	fence->ring = ring;
+	fence_init(&fence->base, &amdgpu_fence_ops,
+		   &ring->fence_drv.lock,
+		   adev->fence_context + ring->idx,
+		   seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
-			       (*fence)->seq,
-			       AMDGPU_FENCE_FLAG_INT);
+			       seq, AMDGPU_FENCE_FLAG_INT);
+
+	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+	/* This function can't be called concurrently anyway, otherwise
+	 * emitting the fence would mess up the hardware ring buffer.
+	 */
+	BUG_ON(rcu_dereference_protected(*ptr, 1));
+
+	rcu_assign_pointer(*ptr, fence_get(&fence->base));
+
+	*f = &fence->base;
+
 	return 0;
 }
 
@@ -134,89 +164,48 @@
 }
 
 /**
- * amdgpu_fence_activity - check for fence activity
+ * amdgpu_fence_process - check for fence activity
  *
  * @ring: pointer to struct amdgpu_ring
  *
  * Checks the current fence value and calculates the last
- * signalled fence value. Returns true if activity occured
- * on the ring, and the fence_queue should be waken up.
- */
-static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
-{
-	uint64_t seq, last_seq, last_emitted;
-	unsigned count_loop = 0;
-	bool wake = false;
-
-	/* Note there is a scenario here for an infinite loop but it's
-	 * very unlikely to happen. For it to happen, the current polling
-	 * process need to be interrupted by another process and another
-	 * process needs to update the last_seq btw the atomic read and
-	 * xchg of the current process.
-	 *
-	 * More over for this to go in infinite loop there need to be
-	 * continuously new fence signaled ie amdgpu_fence_read needs
-	 * to return a different value each time for both the currently
-	 * polling process and the other process that xchg the last_seq
-	 * btw atomic read and xchg of the current process. And the
-	 * value the other process set as last seq must be higher than
-	 * the seq value we just read. Which means that current process
-	 * need to be interrupted after amdgpu_fence_read and before
-	 * atomic xchg.
-	 *
-	 * To be even more safe we count the number of time we loop and
-	 * we bail after 10 loop just accepting the fact that we might
-	 * have temporarly set the last_seq not to the true real last
-	 * seq but to an older one.
-	 */
-	last_seq = atomic64_read(&ring->fence_drv.last_seq);
-	do {
-		last_emitted = ring->fence_drv.sync_seq[ring->idx];
-		seq = amdgpu_fence_read(ring);
-		seq |= last_seq & 0xffffffff00000000LL;
-		if (seq < last_seq) {
-			seq &= 0xffffffff;
-			seq |= last_emitted & 0xffffffff00000000LL;
-		}
-
-		if (seq <= last_seq || seq > last_emitted) {
-			break;
-		}
-		/* If we loop over we don't want to return without
-		 * checking if a fence is signaled as it means that the
-		 * seq we just read is different from the previous on.
-		 */
-		wake = true;
-		last_seq = seq;
-		if ((count_loop++) > 10) {
-			/* We looped over too many time leave with the
-			 * fact that we might have set an older fence
-			 * seq then the current real last seq as signaled
-			 * by the hw.
-			 */
-			break;
-		}
-	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
-
-	if (seq < last_emitted)
-		amdgpu_fence_schedule_fallback(ring);
-
-	return wake;
-}
-
-/**
- * amdgpu_fence_process - process a fence
- *
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
- *
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * signalled fence value. Wakes the fence queue if the
+ * sequence number has increased.
  */
 void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->fence_drv.fence_queue);
+	struct amdgpu_fence_driver *drv = &ring->fence_drv;
+	uint32_t seq, last_seq;
+	int r;
+
+	do {
+		last_seq = atomic_read(&ring->fence_drv.last_seq);
+		seq = amdgpu_fence_read(ring);
+
+	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
+
+	if (seq != ring->fence_drv.sync_seq)
+		amdgpu_fence_schedule_fallback(ring);
+
+	while (last_seq != seq) {
+		struct fence *fence, **ptr;
+
+		ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+
+		/* There is always exactly one thread signaling this fence slot */
+		fence = rcu_dereference_protected(*ptr, 1);
+		rcu_assign_pointer(*ptr, NULL);
+
+		BUG_ON(!fence);
+
+		r = fence_signal(fence);
+		if (!r)
+			FENCE_TRACE(fence, "signaled from irq context\n");
+		else
+			BUG();
+
+		fence_put(fence);
+	}
 }
 
 /**
@@ -234,83 +223,6 @@
 }
 
 /**
- * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
- *
- * @ring: ring the fence is associated with
- * @seq: sequence number
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if the fence has signaled (current fence value
- * is >= requested value) or false if it has not (current fence
- * value is < the requested value.  Helper function for
- * amdgpu_fence_signaled().
- */
-static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
-{
-	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-		return true;
-
-	/* poll new last sequence at least once */
-	amdgpu_fence_process(ring);
-	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-		return true;
-
-	return false;
-}
-
-/*
- * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
- * @ring: ring to wait on for the seq number
- * @seq: seq number wait for
- *
- * return value:
- * 0: seq signaled, and gpu not hang
- * -EDEADL: GPU hang detected
- * -EINVAL: some paramter is not valid
- */
-static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
-{
-	bool signaled = false;
-
-	BUG_ON(!ring);
-	if (seq > ring->fence_drv.sync_seq[ring->idx])
-		return -EINVAL;
-
-	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-		return 0;
-
-	amdgpu_fence_schedule_fallback(ring);
-	wait_event(ring->fence_drv.fence_queue, (
-		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
-
-	if (signaled)
-		return 0;
-	else
-		return -EDEADLK;
-}
-
-/**
- * amdgpu_fence_wait_next - wait for the next fence to signal
- *
- * @adev: amdgpu device pointer
- * @ring: ring index the fence is associated with
- *
- * Wait for the next fence on the requested ring to signal (all asics).
- * Returns 0 if the next fence has passed, error for all other cases.
- * Caller must hold ring lock.
- */
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
-{
-	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-
-	if (seq >= ring->fence_drv.sync_seq[ring->idx])
-		return -ENOENT;
-
-	return amdgpu_fence_ring_wait_seq(ring, seq);
-}
-
-/**
  * amdgpu_fence_wait_empty - wait for all fences to signal
  *
  * @adev: amdgpu device pointer
@@ -318,16 +230,28 @@
  *
  * Wait for all fences on the requested ring to signal (all asics).
  * Returns 0 if the fences have passed, error for all other cases.
- * Caller must hold ring lock.
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
+	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+	struct fence *fence, **ptr;
+	int r;
 
 	if (!seq)
 		return 0;
 
-	return amdgpu_fence_ring_wait_seq(ring, seq);
+	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+	rcu_read_lock();
+	fence = rcu_dereference(*ptr);
+	if (!fence || !fence_get_rcu(fence)) {
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	r = fence_wait(fence, false);
+	fence_put(fence);
+	return r;
 }
 
 /**
@@ -347,75 +271,10 @@
 	 * but it's ok to report slightly wrong fence count here.
 	 */
 	amdgpu_fence_process(ring);
-	emitted = ring->fence_drv.sync_seq[ring->idx]
-		- atomic64_read(&ring->fence_drv.last_seq);
-	/* to avoid 32bits warp around */
-	if (emitted > 0x10000000)
-		emitted = 0x10000000;
-
-	return (unsigned)emitted;
-}
-
-/**
- * amdgpu_fence_need_sync - do we need a semaphore
- *
- * @fence: amdgpu fence object
- * @dst_ring: which ring to check against
- *
- * Check if the fence needs to be synced against another ring
- * (all asics).  If so, we need to emit a semaphore.
- * Returns true if we need to sync with another ring, false if
- * not.
- */
-bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
-			    struct amdgpu_ring *dst_ring)
-{
-	struct amdgpu_fence_driver *fdrv;
-
-	if (!fence)
-		return false;
-
-	if (fence->ring == dst_ring)
-		return false;
-
-	/* we are protected by the ring mutex */
-	fdrv = &dst_ring->fence_drv;
-	if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
-		return false;
-
-	return true;
-}
-
-/**
- * amdgpu_fence_note_sync - record the sync point
- *
- * @fence: amdgpu fence object
- * @dst_ring: which ring to check against
- *
- * Note the sequence number at which point the fence will
- * be synced with the requested ring (all asics).
- */
-void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
-			    struct amdgpu_ring *dst_ring)
-{
-	struct amdgpu_fence_driver *dst, *src;
-	unsigned i;
-
-	if (!fence)
-		return;
-
-	if (fence->ring == dst_ring)
-		return;
-
-	/* we are protected by the ring mutex */
-	src = &fence->ring->fence_drv;
-	dst = &dst_ring->fence_drv;
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		if (i == dst_ring->idx)
-			continue;
-
-		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
-	}
+	emitted = 0x100000000ull;
+	emitted -= atomic_read(&ring->fence_drv.last_seq);
+	emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+	return lower_32_bits(emitted);
 }
 
 /**
@@ -447,7 +306,7 @@
 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
 	}
-	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
+	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
 	amdgpu_irq_get(adev, irq_src, irq_type);
 
 	ring->fence_drv.irq_src = irq_src;
@@ -465,47 +324,55 @@
  * for the requested ring.
  *
  * @ring: ring to init the fence driver on
+ * @num_hw_submission: number of entries on the hardware queue
  *
  * Init the fence driver for the requested ring (all asics).
  * Helper function for amdgpu_fence_driver_init().
  */
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+				  unsigned num_hw_submission)
 {
-	int i, r;
+	long timeout;
+	int r;
+
+	/* Check that num_hw_submission is a power of two */
+	if ((num_hw_submission & (num_hw_submission - 1)) != 0)
+		return -EINVAL;
 
 	ring->fence_drv.cpu_addr = NULL;
 	ring->fence_drv.gpu_addr = 0;
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-		ring->fence_drv.sync_seq[i] = 0;
-
-	atomic64_set(&ring->fence_drv.last_seq, 0);
+	ring->fence_drv.sync_seq = 0;
+	atomic_set(&ring->fence_drv.last_seq, 0);
 	ring->fence_drv.initialized = false;
 
 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
 		    (unsigned long)ring);
 
-	init_waitqueue_head(&ring->fence_drv.fence_queue);
+	ring->fence_drv.num_fences_mask = num_hw_submission - 1;
+	spin_lock_init(&ring->fence_drv.lock);
+	ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
+					 GFP_KERNEL);
+	if (!ring->fence_drv.fences)
+		return -ENOMEM;
 
-	if (amdgpu_enable_scheduler) {
-		long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-		if (timeout == 0) {
-			/*
-			 * FIXME:
-			 * Delayed workqueue cannot use it directly,
-			 * so the scheduler will not use delayed workqueue if
-			 * MAX_SCHEDULE_TIMEOUT is set.
-			 * Currently keep it simple and silly.
-			 */
-			timeout = MAX_SCHEDULE_TIMEOUT;
-		}
-		r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
-				   amdgpu_sched_hw_submission,
-				   timeout, ring->name);
-		if (r) {
-			DRM_ERROR("Failed to create scheduler on ring %s.\n",
-				  ring->name);
-			return r;
-		}
+	timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+	if (timeout == 0) {
+		/*
+		 * FIXME:
+		 * Delayed workqueue cannot use it directly,
+		 * so the scheduler will not use delayed workqueue if
+		 * MAX_SCHEDULE_TIMEOUT is set.
+		 * Currently keep it simple and silly.
+		 */
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	}
+	r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+			   num_hw_submission,
+			   timeout, ring->name);
+	if (r) {
+		DRM_ERROR("Failed to create scheduler on ring %s.\n",
+			  ring->name);
+		return r;
 	}
 
 	return 0;
@@ -548,11 +415,9 @@
  */
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 {
-	int i, r;
+	unsigned i, j;
+	int r;
 
-	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
-		kmem_cache_destroy(amdgpu_fence_slab);
-	mutex_lock(&adev->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
 
@@ -563,14 +428,18 @@
 			/* no need to trigger GPU reset as we are unloading */
 			amdgpu_fence_driver_force_completion(adev);
 		}
-		wake_up_all(&ring->fence_drv.fence_queue);
 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
 		amd_sched_fini(&ring->sched);
 		del_timer_sync(&ring->fence_drv.fallback_timer);
+		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+			fence_put(ring->fence_drv.fences[i]);
+		kfree(ring->fence_drv.fences);
 		ring->fence_drv.initialized = false;
 	}
-	mutex_unlock(&adev->ring_lock);
+
+	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+		kmem_cache_destroy(amdgpu_fence_slab);
 }
 
 /**
@@ -585,7 +454,6 @@
 {
 	int i, r;
 
-	mutex_lock(&adev->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
 		if (!ring || !ring->fence_drv.initialized)
@@ -602,7 +470,6 @@
 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
 	}
-	mutex_unlock(&adev->ring_lock);
 }
 
 /**
@@ -621,7 +488,6 @@
 {
 	int i;
 
-	mutex_lock(&adev->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
 		if (!ring || !ring->fence_drv.initialized)
@@ -631,7 +497,6 @@
 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
 	}
-	mutex_unlock(&adev->ring_lock);
 }
 
 /**
@@ -651,7 +516,7 @@
 		if (!ring || !ring->fence_drv.initialized)
 			continue;
 
-		amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
+		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
 	}
 }
 
@@ -671,66 +536,6 @@
 }
 
 /**
- * amdgpu_fence_is_signaled - test if fence is signaled
- *
- * @f: fence to test
- *
- * Test the fence sequence number if it is already signaled. If it isn't
- * signaled start fence processing. Returns True if the fence is signaled.
- */
-static bool amdgpu_fence_is_signaled(struct fence *f)
-{
-	struct amdgpu_fence *fence = to_amdgpu_fence(f);
-	struct amdgpu_ring *ring = fence->ring;
-
-	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
-		return true;
-
-	amdgpu_fence_process(ring);
-
-	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
-		return true;
-
-	return false;
-}
-
-/**
- * amdgpu_fence_check_signaled - callback from fence_queue
- *
- * this function is called with fence_queue lock held, which is also used
- * for the fence locking itself, so unlocked variants are used for
- * fence_signal, and remove_wait_queue.
- */
-static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
-{
-	struct amdgpu_fence *fence;
-	struct amdgpu_device *adev;
-	u64 seq;
-	int ret;
-
-	fence = container_of(wait, struct amdgpu_fence, fence_wake);
-	adev = fence->ring->adev;
-
-	/*
-	 * We cannot use amdgpu_fence_process here because we're already
-	 * in the waitqueue, in a call from wake_up_all.
-	 */
-	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
-	if (seq >= fence->seq) {
-		ret = fence_signal_locked(&fence->base);
-		if (!ret)
-			FENCE_TRACE(&fence->base, "signaled from irq context\n");
-		else
-			FENCE_TRACE(&fence->base, "was already signaled\n");
-
-		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
-		fence_put(&fence->base);
-	} else
-		FENCE_TRACE(&fence->base, "pending\n");
-	return 0;
-}
-
-/**
  * amdgpu_fence_enable_signaling - enable signalling on fence
  * @fence: fence
  *
@@ -743,31 +548,45 @@
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
 
-	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
-		return false;
-
-	fence->fence_wake.flags = 0;
-	fence->fence_wake.private = NULL;
-	fence->fence_wake.func = amdgpu_fence_check_signaled;
-	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
-	fence_get(f);
 	if (!timer_pending(&ring->fence_drv.fallback_timer))
 		amdgpu_fence_schedule_fallback(ring);
+
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+
 	return true;
 }
 
-static void amdgpu_fence_release(struct fence *f)
+/**
+ * amdgpu_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the fence memory after the RCU grace period.
+ */
+static void amdgpu_fence_free(struct rcu_head *rcu)
 {
+	struct fence *f = container_of(rcu, struct fence, rcu);
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	kmem_cache_free(amdgpu_fence_slab, fence);
 }
 
-const struct fence_ops amdgpu_fence_ops = {
+/**
+ * amdgpu_fence_release - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void amdgpu_fence_release(struct fence *f)
+{
+	call_rcu(&f->rcu, amdgpu_fence_free);
+}
+
+static const struct fence_ops amdgpu_fence_ops = {
 	.get_driver_name = amdgpu_fence_get_driver_name,
 	.get_timeline_name = amdgpu_fence_get_timeline_name,
 	.enable_signaling = amdgpu_fence_enable_signaling,
-	.signaled = amdgpu_fence_is_signaled,
 	.wait = fence_default_wait,
 	.release = amdgpu_fence_release,
 };
@@ -781,7 +600,7 @@
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
-	int i, j;
+	int i;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		struct amdgpu_ring *ring = adev->rings[i];
@@ -791,31 +610,41 @@
 		amdgpu_fence_process(ring);
 
 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
-		seq_printf(m, "Last signaled fence 0x%016llx\n",
-			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
-		seq_printf(m, "Last emitted        0x%016llx\n",
-			   ring->fence_drv.sync_seq[i]);
-
-		for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
-			struct amdgpu_ring *other = adev->rings[j];
-			if (i != j && other && other->fence_drv.initialized &&
-			    ring->fence_drv.sync_seq[j])
-				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
-					   j, ring->fence_drv.sync_seq[j]);
-		}
+		seq_printf(m, "Last signaled fence 0x%08x\n",
+			   atomic_read(&ring->fence_drv.last_seq));
+		seq_printf(m, "Last emitted        0x%08x\n",
+			   ring->fence_drv.sync_seq);
 	}
 	return 0;
 }
 
+/**
+ * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
+ *
+ * Manually trigger a gpu reset at the next fence wait.
+ */
+static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	seq_printf(m, "gpu reset\n");
+	amdgpu_gpu_reset(adev);
+
+	return 0;
+}
+
 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
+	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
 };
 #endif
 
 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
-	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
+	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
 #else
 	return 0;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d20c2a8..fa6a27b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 #include <linux/ktime.h>
+#include <linux/pagemap.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -83,24 +84,32 @@
 		return r;
 	}
 	*obj = &robj->gem_base;
-	robj->pid = task_pid_nr(current);
-
-	mutex_lock(&adev->gem.mutex);
-	list_add_tail(&robj->list, &adev->gem.objects);
-	mutex_unlock(&adev->gem.mutex);
 
 	return 0;
 }
 
-int amdgpu_gem_init(struct amdgpu_device *adev)
+void amdgpu_gem_force_release(struct amdgpu_device *adev)
 {
-	INIT_LIST_HEAD(&adev->gem.objects);
-	return 0;
-}
+	struct drm_device *ddev = adev->ddev;
+	struct drm_file *file;
 
-void amdgpu_gem_fini(struct amdgpu_device *adev)
-{
-	amdgpu_bo_force_delete(adev);
+	mutex_lock(&ddev->struct_mutex);
+
+	list_for_each_entry(file, &ddev->filelist, lhead) {
+		struct drm_gem_object *gobj;
+		int handle;
+
+		WARN_ONCE(1, "Still active user space clients!\n");
+		spin_lock(&file->table_lock);
+		idr_for_each_entry(&file->object_idr, gobj, handle) {
+			WARN_ONCE(1, "And also active allocations!\n");
+			drm_gem_object_unreference(gobj);
+		}
+		idr_destroy(&file->object_idr);
+		spin_unlock(&file->table_lock);
+	}
+
+	mutex_unlock(&ddev->struct_mutex);
 }
 
 /*
@@ -132,25 +141,40 @@
 void amdgpu_gem_object_close(struct drm_gem_object *obj,
 			     struct drm_file *file_priv)
 {
-	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
-	struct amdgpu_device *adev = rbo->adev;
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = bo->adev;
 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
+
+	struct amdgpu_bo_list_entry vm_pd;
+	struct list_head list, duplicates;
+	struct ttm_validate_buffer tv;
+	struct ww_acquire_ctx ticket;
 	struct amdgpu_bo_va *bo_va;
 	int r;
-	r = amdgpu_bo_reserve(rbo, true);
+
+	INIT_LIST_HEAD(&list);
+	INIT_LIST_HEAD(&duplicates);
+
+	tv.bo = &bo->tbo;
+	tv.shared = true;
+	list_add(&tv.head, &list);
+
+	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
 	if (r) {
 		dev_err(adev->dev, "leaking bo va because "
 			"we fail to reserve bo (%d)\n", r);
 		return;
 	}
-	bo_va = amdgpu_vm_bo_find(vm, rbo);
+	bo_va = amdgpu_vm_bo_find(vm, bo);
 	if (bo_va) {
 		if (--bo_va->ref_count == 0) {
 			amdgpu_vm_bo_rmv(adev, bo_va);
 		}
 	}
-	amdgpu_bo_unreserve(rbo);
+	ttm_eu_backoff_reservation(&ticket, &list);
 }
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -235,12 +259,10 @@
 	    AMDGPU_GEM_USERPTR_REGISTER))
 		return -EINVAL;
 
-	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
-	     !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
-	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
+	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
+	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 
-		/* if we want to write to it we must require anonymous
-		   memory and install a MMU notifier */
+		/* if we want to write to it we must install a MMU notifier */
 		return -EACCES;
 	}
 
@@ -252,6 +274,8 @@
 		goto handle_lockup;
 
 	bo = gem_to_amdgpu_bo(gobj);
+	bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
 	if (r)
 		goto release_object;
@@ -264,18 +288,23 @@
 
 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 		down_read(&current->mm->mmap_sem);
+
+		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
+						 bo->tbo.ttm->pages);
+		if (r)
+			goto unlock_mmap_sem;
+
 		r = amdgpu_bo_reserve(bo, true);
-		if (r) {
-			up_read(&current->mm->mmap_sem);
-			goto release_object;
-		}
+		if (r)
+			goto free_pages;
 
 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 		amdgpu_bo_unreserve(bo);
-		up_read(&current->mm->mmap_sem);
 		if (r)
-			goto release_object;
+			goto free_pages;
+
+		up_read(&current->mm->mmap_sem);
 	}
 
 	r = drm_gem_handle_create(filp, gobj, &handle);
@@ -287,6 +316,12 @@
 	args->handle = handle;
 	return 0;
 
+free_pages:
+	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+
+unlock_mmap_sem:
+	up_read(&current->mm->mmap_sem);
+
 release_object:
 	drm_gem_object_unreference_unlocked(gobj);
 
@@ -308,7 +343,7 @@
 		return -ENOENT;
 	}
 	robj = gem_to_amdgpu_bo(gobj);
-	if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
+	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 		drm_gem_object_unreference_unlocked(gobj);
 		return -EPERM;
@@ -559,11 +594,10 @@
 	tv.shared = true;
 	list_add(&tv.head, &list);
 
-	if (args->operation == AMDGPU_VA_OP_MAP) {
-		tv_pd.bo = &fpriv->vm.page_directory->tbo;
-		tv_pd.shared = true;
-		list_add(&tv_pd.head, &list);
-	}
+	tv_pd.bo = &fpriv->vm.page_directory->tbo;
+	tv_pd.shared = true;
+	list_add(&tv_pd.head, &list);
+
 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 	if (r) {
 		drm_gem_object_unreference_unlocked(gobj);
@@ -629,7 +663,7 @@
 
 		info.bo_size = robj->gem_base.size;
 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
-		info.domains = robj->initial_domain;
+		info.domains = robj->prefered_domains;
 		info.domain_flags = robj->flags;
 		amdgpu_bo_unreserve(robj);
 		if (copy_to_user(out, &info, sizeof(info)))
@@ -637,14 +671,18 @@
 		break;
 	}
 	case AMDGPU_GEM_OP_SET_PLACEMENT:
-		if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
+		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 			r = -EPERM;
 			amdgpu_bo_unreserve(robj);
 			break;
 		}
-		robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
-						      AMDGPU_GEM_DOMAIN_GTT |
-						      AMDGPU_GEM_DOMAIN_CPU);
+		robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+							AMDGPU_GEM_DOMAIN_GTT |
+							AMDGPU_GEM_DOMAIN_CPU);
+		robj->allowed_domains = robj->prefered_domains;
+		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
 		amdgpu_bo_unreserve(robj);
 		break;
 	default:
@@ -689,38 +727,73 @@
 }
 
 #if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *gobj = ptr;
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+	struct seq_file *m = data;
+
+	unsigned domain;
+	const char *placement;
+	unsigned pin_count;
+
+	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+	switch (domain) {
+	case AMDGPU_GEM_DOMAIN_VRAM:
+		placement = "VRAM";
+		break;
+	case AMDGPU_GEM_DOMAIN_GTT:
+		placement = " GTT";
+		break;
+	case AMDGPU_GEM_DOMAIN_CPU:
+	default:
+		placement = " CPU";
+		break;
+	}
+	seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
+		   id, amdgpu_bo_size(bo), placement,
+		   amdgpu_bo_gpu_offset(bo));
+
+	pin_count = ACCESS_ONCE(bo->pin_count);
+	if (pin_count)
+		seq_printf(m, " pin count %d", pin_count);
+	seq_printf(m, "\n");
+
+	return 0;
+}
+
 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_device *dev = node->minor->dev;
-	struct amdgpu_device *adev = dev->dev_private;
-	struct amdgpu_bo *rbo;
-	unsigned i = 0;
+	struct drm_file *file;
+	int r;
 
-	mutex_lock(&adev->gem.mutex);
-	list_for_each_entry(rbo, &adev->gem.objects, list) {
-		unsigned domain;
-		const char *placement;
+	r = mutex_lock_interruptible(&dev->struct_mutex);
+	if (r)
+		return r;
 
-		domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
-		switch (domain) {
-		case AMDGPU_GEM_DOMAIN_VRAM:
-			placement = "VRAM";
-			break;
-		case AMDGPU_GEM_DOMAIN_GTT:
-			placement = " GTT";
-			break;
-		case AMDGPU_GEM_DOMAIN_CPU:
-		default:
-			placement = " CPU";
-			break;
-		}
-		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
-			   i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
-			   placement, (unsigned long)rbo->pid);
-		i++;
+	list_for_each_entry(file, &dev->filelist, lhead) {
+		struct task_struct *task;
+
+		/*
+		 * Although we have a valid reference on file->pid, that does
+		 * not guarantee that the task_struct who called get_pid() is
+		 * still alive (e.g. get_pid(current) => fork() => exit()).
+		 * Therefore, we need to protect this ->comm access using RCU.
+		 */
+		rcu_read_lock();
+		task = pid_task(file->pid, PIDTYPE_PID);
+		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+			   task ? task->comm : "<unknown>");
+		rcu_read_unlock();
+
+		spin_lock(&file->table_lock);
+		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
+		spin_unlock(&file->table_lock);
 	}
-	mutex_unlock(&adev->gem.mutex);
+
+	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 9e25eda..8443cea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -55,10 +55,9 @@
  * suballocator.
  * Returns 0 on success, error on failure.
  */
-int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		  unsigned size, struct amdgpu_ib *ib)
 {
-	struct amdgpu_device *adev = ring->adev;
 	int r;
 
 	if (size) {
@@ -75,10 +74,8 @@
 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 	}
 
-	amdgpu_sync_create(&ib->sync);
-
-	ib->ring = ring;
 	ib->vm = vm;
+	ib->vm_id = 0;
 
 	return 0;
 }
@@ -88,15 +85,13 @@
  *
  * @adev: amdgpu_device pointer
  * @ib: IB object to free
+ * @f: the fence SA bo need wait on for the ib alloation
  *
  * Free an IB (all asics).
  */
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
 {
-	amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
-	amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
-	if (ib->fence)
-		fence_put(&ib->fence->base);
+	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
 }
 
 /**
@@ -105,7 +100,7 @@
  * @adev: amdgpu_device pointer
  * @num_ibs: number of IBs to schedule
  * @ibs: IB objects to schedule
- * @owner: owner for creating the fences
+ * @f: fence created during this submission
  *
  * Schedule an IB on the associated ring (all asics).
  * Returns 0 on success, error on failure.
@@ -120,20 +115,21 @@
  * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
  * to SI there was just a DE IB.
  */
-int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
-		       struct amdgpu_ib *ibs, void *owner)
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+		       struct amdgpu_ib *ibs, struct fence *last_vm_update,
+		       struct fence **f)
 {
+	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib *ib = &ibs[0];
-	struct amdgpu_ring *ring;
 	struct amdgpu_ctx *ctx, *old_ctx;
 	struct amdgpu_vm *vm;
+	struct fence *hwf;
 	unsigned i;
 	int r = 0;
 
 	if (num_ibs == 0)
 		return -EINVAL;
 
-	ring = ibs->ring;
 	ctx = ibs->ctx;
 	vm = ibs->vm;
 
@@ -141,42 +137,24 @@
 		dev_err(adev->dev, "couldn't schedule ib\n");
 		return -EINVAL;
 	}
-	r = amdgpu_sync_wait(&ibs->sync);
-	if (r) {
-		dev_err(adev->dev, "IB sync failed (%d).\n", r);
-		return r;
+
+	if (vm && !ibs->vm_id) {
+		dev_err(adev->dev, "VM IB without ID\n");
+		return -EINVAL;
 	}
-	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
+
+	r = amdgpu_ring_alloc(ring, 256 * num_ibs);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
 		return r;
 	}
 
 	if (vm) {
-		/* grab a vm id if necessary */
-		r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
-		if (r) {
-			amdgpu_ring_unlock_undo(ring);
-			return r;
-		}
-	}
-
-	r = amdgpu_sync_rings(&ibs->sync, ring);
-	if (r) {
-		amdgpu_ring_unlock_undo(ring);
-		dev_err(adev->dev, "failed to sync rings (%d)\n", r);
-		return r;
-	}
-
-	if (vm) {
 		/* do context switch */
-		amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
-
-		if (ring->funcs->emit_gds_switch)
-			amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
-						    ib->gds_base, ib->gds_size,
-						    ib->gws_base, ib->gws_size,
-						    ib->oa_base, ib->oa_size);
+		amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
+				ib->gds_base, ib->gds_size,
+				ib->gws_base, ib->gws_size,
+				ib->oa_base, ib->oa_size);
 
 		if (ring->funcs->emit_hdp_flush)
 			amdgpu_ring_emit_hdp_flush(ring);
@@ -186,27 +164,32 @@
 	for (i = 0; i < num_ibs; ++i) {
 		ib = &ibs[i];
 
-		if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) {
+		if (ib->ctx != ctx || ib->vm != vm) {
 			ring->current_ctx = old_ctx;
-			amdgpu_ring_unlock_undo(ring);
+			if (ib->vm_id)
+				amdgpu_vm_reset_id(adev, ib->vm_id);
+			amdgpu_ring_undo(ring);
 			return -EINVAL;
 		}
 		amdgpu_ring_emit_ib(ring, ib);
 		ring->current_ctx = ctx;
 	}
 
-	r = amdgpu_fence_emit(ring, owner, &ib->fence);
+	if (vm) {
+		if (ring->funcs->emit_hdp_invalidate)
+			amdgpu_ring_emit_hdp_invalidate(ring);
+	}
+
+	r = amdgpu_fence_emit(ring, &hwf);
 	if (r) {
 		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
 		ring->current_ctx = old_ctx;
-		amdgpu_ring_unlock_undo(ring);
+		if (ib->vm_id)
+			amdgpu_vm_reset_id(adev, ib->vm_id);
+		amdgpu_ring_undo(ring);
 		return r;
 	}
 
-	if (!amdgpu_enable_scheduler && ib->ctx)
-		ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
-						    &ib->fence->base);
-
 	/* wrap the last IB with fence */
 	if (ib->user) {
 		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
@@ -215,10 +198,10 @@
 				       AMDGPU_FENCE_FLAG_64BIT);
 	}
 
-	if (ib->vm)
-		amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
+	if (f)
+		*f = fence_get(hwf);
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
new file mode 100644
index 0000000..9c9b19e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+		     struct amdgpu_job **job)
+{
+	size_t size = sizeof(struct amdgpu_job);
+
+	if (num_ibs == 0)
+		return -EINVAL;
+
+	size += sizeof(struct amdgpu_ib) * num_ibs;
+
+	*job = kzalloc(size, GFP_KERNEL);
+	if (!*job)
+		return -ENOMEM;
+
+	(*job)->adev = adev;
+	(*job)->ibs = (void *)&(*job)[1];
+	(*job)->num_ibs = num_ibs;
+
+	amdgpu_sync_create(&(*job)->sync);
+
+	return 0;
+}
+
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+			     struct amdgpu_job **job)
+{
+	int r;
+
+	r = amdgpu_job_alloc(adev, 1, job);
+	if (r)
+		return r;
+
+	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+	if (r)
+		kfree(*job);
+
+	return r;
+}
+
+void amdgpu_job_free(struct amdgpu_job *job)
+{
+	unsigned i;
+	struct fence *f;
+	/* use sched fence if available */
+	f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+
+	for (i = 0; i < job->num_ibs; ++i)
+		amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
+	fence_put(job->fence);
+
+	amdgpu_bo_unref(&job->uf.bo);
+	amdgpu_sync_free(&job->sync);
+	kfree(job);
+}
+
+int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+		      struct amd_sched_entity *entity, void *owner,
+		      struct fence **f)
+{
+	job->ring = ring;
+	job->base.sched = &ring->sched;
+	job->base.s_entity = entity;
+	job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+	if (!job->base.s_fence)
+		return -ENOMEM;
+
+	*f = fence_get(&job->base.s_fence->base);
+
+	job->owner = owner;
+	amd_sched_entity_push_job(&job->base);
+
+	return 0;
+}
+
+static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+{
+	struct amdgpu_job *job = to_amdgpu_job(sched_job);
+	struct amdgpu_vm *vm = job->ibs->vm;
+
+	struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+
+	if (fence == NULL && vm && !job->ibs->vm_id) {
+		struct amdgpu_ring *ring = job->ring;
+		unsigned i, vm_id;
+		uint64_t vm_pd_addr;
+		int r;
+
+		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
+				      &job->base.s_fence->base,
+				      &vm_id, &vm_pd_addr);
+		if (r)
+			DRM_ERROR("Error getting VM ID (%d)\n", r);
+		else {
+			for (i = 0; i < job->num_ibs; ++i) {
+				job->ibs[i].vm_id = vm_id;
+				job->ibs[i].vm_pd_addr = vm_pd_addr;
+			}
+		}
+
+		fence = amdgpu_sync_get_fence(&job->sync);
+	}
+
+	return fence;
+}
+
+static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+{
+	struct fence *fence = NULL;
+	struct amdgpu_job *job;
+	int r;
+
+	if (!sched_job) {
+		DRM_ERROR("job is null\n");
+		return NULL;
+	}
+	job = to_amdgpu_job(sched_job);
+
+	r = amdgpu_sync_wait(&job->sync);
+	if (r) {
+		DRM_ERROR("failed to sync wait (%d)\n", r);
+		return NULL;
+	}
+
+	trace_amdgpu_sched_run_job(job);
+	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
+			       job->sync.last_vm_update, &fence);
+	if (r) {
+		DRM_ERROR("Error scheduling IBs (%d)\n", r);
+		goto err;
+	}
+
+err:
+	job->fence = fence;
+	amdgpu_job_free(job);
+	return fence;
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+	.dependency = amdgpu_job_dependency,
+	.run_job = amdgpu_job_run,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e23843f..7805a87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -447,8 +447,7 @@
 			dev_info.max_memory_clock = adev->pm.default_mclk * 10;
 		}
 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
-		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
-					adev->gfx.config.max_shader_engines;
+		dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
 		dev_info._pad = 0;
 		dev_info.ids_flags = 0;
@@ -727,6 +726,12 @@
 
 	/* Get associated drm_crtc: */
 	crtc = &adev->mode_info.crtcs[pipe]->base;
+	if (!crtc) {
+		/* This can occur on driver load if some component fails to
+		 * initialize completely and driver is unloaded */
+		DRM_ERROR("Uninitialized crtc %d\n", pipe);
+		return -EINVAL;
+	}
 
 	/* Helper routine in DRM core does all the work: */
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d4e2780..d7ec9bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,8 +48,7 @@
 	/* protected by adev->mn_lock */
 	struct hlist_node	node;
 
-	/* objects protected by lock */
-	struct mutex		lock;
+	/* objects protected by mm->mmap_sem */
 	struct rb_root		objects;
 };
 
@@ -73,21 +72,19 @@
 	struct amdgpu_bo *bo, *next_bo;
 
 	mutex_lock(&adev->mn_lock);
-	mutex_lock(&rmn->lock);
+	down_write(&rmn->mm->mmap_sem);
 	hash_del(&rmn->node);
 	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
 					     it.rb) {
-
-		interval_tree_remove(&node->it, &rmn->objects);
 		list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
 			bo->mn = NULL;
 			list_del_init(&bo->mn_list);
 		}
 		kfree(node);
 	}
-	mutex_unlock(&rmn->lock);
+	up_write(&rmn->mm->mmap_sem);
 	mutex_unlock(&adev->mn_lock);
-	mmu_notifier_unregister(&rmn->mn, rmn->mm);
+	mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
 	kfree(rmn);
 }
 
@@ -129,8 +126,6 @@
 	/* notification is exclusive, but interval is inclusive */
 	end -= 1;
 
-	mutex_lock(&rmn->lock);
-
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	while (it) {
 		struct amdgpu_mn_node *node;
@@ -165,8 +160,6 @@
 			amdgpu_bo_unreserve(bo);
 		}
 	}
-
-	mutex_unlock(&rmn->lock);
 }
 
 static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -187,8 +180,8 @@
 	struct amdgpu_mn *rmn;
 	int r;
 
-	down_write(&mm->mmap_sem);
 	mutex_lock(&adev->mn_lock);
+	down_write(&mm->mmap_sem);
 
 	hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
 		if (rmn->mm == mm)
@@ -203,7 +196,6 @@
 	rmn->adev = adev;
 	rmn->mm = mm;
 	rmn->mn.ops = &amdgpu_mn_ops;
-	mutex_init(&rmn->lock);
 	rmn->objects = RB_ROOT;
 
 	r = __mmu_notifier_register(&rmn->mn, mm);
@@ -213,14 +205,14 @@
 	hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
 
 release_locks:
-	mutex_unlock(&adev->mn_lock);
 	up_write(&mm->mmap_sem);
+	mutex_unlock(&adev->mn_lock);
 
 	return rmn;
 
 free_rmn:
-	mutex_unlock(&adev->mn_lock);
 	up_write(&mm->mmap_sem);
+	mutex_unlock(&adev->mn_lock);
 	kfree(rmn);
 
 	return ERR_PTR(r);
@@ -250,7 +242,7 @@
 
 	INIT_LIST_HEAD(&bos);
 
-	mutex_lock(&rmn->lock);
+	down_write(&rmn->mm->mmap_sem);
 
 	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 		kfree(node);
@@ -264,7 +256,7 @@
 	if (!node) {
 		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
 		if (!node) {
-			mutex_unlock(&rmn->lock);
+			up_write(&rmn->mm->mmap_sem);
 			return -ENOMEM;
 		}
 	}
@@ -279,7 +271,7 @@
 
 	interval_tree_insert(&node->it, &rmn->objects);
 
-	mutex_unlock(&rmn->lock);
+	up_write(&rmn->mm->mmap_sem);
 
 	return 0;
 }
@@ -298,13 +290,15 @@
 	struct list_head *head;
 
 	mutex_lock(&adev->mn_lock);
+
 	rmn = bo->mn;
 	if (rmn == NULL) {
 		mutex_unlock(&adev->mn_lock);
 		return;
 	}
 
-	mutex_lock(&rmn->lock);
+	down_write(&rmn->mm->mmap_sem);
+
 	/* save the next list entry for later */
 	head = bo->mn_list.next;
 
@@ -318,6 +312,6 @@
 		kfree(node);
 	}
 
-	mutex_unlock(&rmn->lock);
+	up_write(&rmn->mm->mmap_sem);
 	mutex_unlock(&adev->mn_lock);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index fdc1be8..8d432e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -390,7 +390,6 @@
 	struct drm_display_mode native_mode;
 	u32 pll_id;
 	/* page flipping */
-	struct workqueue_struct *pflip_queue;
 	struct amdgpu_flip_work *pflip_works;
 	enum amdgpu_flip_status pflip_status;
 	int deferred_flip_completion;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b8fbbd7..151a2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -97,9 +97,6 @@
 
 	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
 
-	mutex_lock(&bo->adev->gem.mutex);
-	list_del_init(&bo->list);
-	mutex_unlock(&bo->adev->gem.mutex);
 	drm_gem_object_release(&bo->gem_base);
 	amdgpu_bo_unref(&bo->parent);
 	kfree(bo->metadata);
@@ -254,12 +251,15 @@
 	bo->adev = adev;
 	INIT_LIST_HEAD(&bo->list);
 	INIT_LIST_HEAD(&bo->va);
-	bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
-				       AMDGPU_GEM_DOMAIN_GTT |
-				       AMDGPU_GEM_DOMAIN_CPU |
-				       AMDGPU_GEM_DOMAIN_GDS |
-				       AMDGPU_GEM_DOMAIN_GWS |
-				       AMDGPU_GEM_DOMAIN_OA);
+	bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+					 AMDGPU_GEM_DOMAIN_GTT |
+					 AMDGPU_GEM_DOMAIN_CPU |
+					 AMDGPU_GEM_DOMAIN_GDS |
+					 AMDGPU_GEM_DOMAIN_GWS |
+					 AMDGPU_GEM_DOMAIN_OA);
+	bo->allowed_domains = bo->prefered_domains;
+	if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
 	bo->flags = flags;
 
@@ -308,7 +308,7 @@
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 {
 	bool is_iomem;
-	int r;
+	long r;
 
 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 		return -EPERM;
@@ -319,14 +319,20 @@
 		}
 		return 0;
 	}
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
-	if (r) {
+
+	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+						MAX_SCHEDULE_TIMEOUT);
+	if (r < 0)
 		return r;
-	}
+
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r)
+		return r;
+
 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
-	if (ptr) {
+	if (ptr)
 		*ptr = bo->kptr;
-	}
+
 	return 0;
 }
 
@@ -367,7 +373,7 @@
 	int r, i;
 	unsigned fpfn, lpfn;
 
-	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 		return -EPERM;
 
 	if (WARN_ON_ONCE(min_offset > max_offset))
@@ -470,26 +476,6 @@
 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 }
 
-void amdgpu_bo_force_delete(struct amdgpu_device *adev)
-{
-	struct amdgpu_bo *bo, *n;
-
-	if (list_empty(&adev->gem.objects)) {
-		return;
-	}
-	dev_err(adev->dev, "Userspace still has active objects !\n");
-	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-		dev_err(adev->dev, "%p %p %lu %lu force free\n",
-			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
-			*((unsigned long *)&bo->gem_base.refcount));
-		mutex_lock(&bo->adev->gem.mutex);
-		list_del_init(&bo->list);
-		mutex_unlock(&bo->adev->gem.mutex);
-		/* this should unref the ttm bo */
-		drm_gem_object_unreference_unlocked(&bo->gem_base);
-	}
-}
-
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
 	/* Add an MTRR for the VRAM */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5107fb2..acc0801 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -149,7 +149,6 @@
 			     u64 *gpu_addr);
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
-void amdgpu_bo_force_delete(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95a4a25..ff9597c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -123,7 +123,9 @@
 		level = amdgpu_dpm_get_performance_level(adev);
 		return snprintf(buf, PAGE_SIZE, "%s\n",
 				(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-				(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+				(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+				(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+				(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
 	} else {
 		enum amdgpu_dpm_forced_level level;
 
@@ -155,6 +157,8 @@
 		level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
 		level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
+		level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
 	} else {
 		count = -EINVAL;
 		goto fail;
@@ -180,10 +184,293 @@
 	return count;
 }
 
+static ssize_t amdgpu_get_pp_num_states(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	struct pp_states_info data;
+	int i, buf_len;
+
+	if (adev->pp_enabled)
+		amdgpu_dpm_get_pp_num_states(adev, &data);
+
+	buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
+	for (i = 0; i < data.nums; i++)
+		buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
+				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
+				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
+				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
+				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
+
+	return buf_len;
+}
+
+static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	struct pp_states_info data;
+	enum amd_pm_state_type pm = 0;
+	int i = 0;
+
+	if (adev->pp_enabled) {
+
+		pm = amdgpu_dpm_get_current_power_state(adev);
+		amdgpu_dpm_get_pp_num_states(adev, &data);
+
+		for (i = 0; i < data.nums; i++) {
+			if (pm == data.states[i])
+				break;
+		}
+
+		if (i == data.nums)
+			i = -EINVAL;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", i);
+}
+
+static ssize_t amdgpu_get_pp_force_state(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	struct pp_states_info data;
+	enum amd_pm_state_type pm = 0;
+	int i;
+
+	if (adev->pp_force_state_enabled && adev->pp_enabled) {
+		pm = amdgpu_dpm_get_current_power_state(adev);
+		amdgpu_dpm_get_pp_num_states(adev, &data);
+
+		for (i = 0; i < data.nums; i++) {
+			if (pm == data.states[i])
+				break;
+		}
+
+		if (i == data.nums)
+			i = -EINVAL;
+
+		return snprintf(buf, PAGE_SIZE, "%d\n", i);
+
+	} else
+		return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t amdgpu_set_pp_force_state(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	enum amd_pm_state_type state = 0;
+	long idx;
+	int ret;
+
+	if (strlen(buf) == 1)
+		adev->pp_force_state_enabled = false;
+	else {
+		ret = kstrtol(buf, 0, &idx);
+
+		if (ret) {
+			count = -EINVAL;
+			goto fail;
+		}
+
+		if (adev->pp_enabled) {
+			struct pp_states_info data;
+			amdgpu_dpm_get_pp_num_states(adev, &data);
+			state = data.states[idx];
+			/* only set user selected power states */
+			if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+				state != POWER_STATE_TYPE_DEFAULT) {
+				amdgpu_dpm_dispatch_task(adev,
+						AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+				adev->pp_force_state_enabled = true;
+			}
+		}
+	}
+fail:
+	return count;
+}
+
+static ssize_t amdgpu_get_pp_table(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	char *table = NULL;
+	int size, i;
+
+	if (adev->pp_enabled)
+		size = amdgpu_dpm_get_pp_table(adev, &table);
+	else
+		return 0;
+
+	if (size >= PAGE_SIZE)
+		size = PAGE_SIZE - 1;
+
+	for (i = 0; i < size; i++) {
+		sprintf(buf + i, "%02x", table[i]);
+	}
+	sprintf(buf + i, "\n");
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_table(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+
+	if (adev->pp_enabled)
+		amdgpu_dpm_set_pp_table(adev, buf, count);
+
+	return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size = 0;
+
+	if (adev->pp_enabled)
+		size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	int ret;
+	long level;
+
+	ret = kstrtol(buf, 0, &level);
+
+	if (ret) {
+		count = -EINVAL;
+		goto fail;
+	}
+
+	if (adev->pp_enabled)
+		amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
+fail:
+	return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size = 0;
+
+	if (adev->pp_enabled)
+		size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	int ret;
+	long level;
+
+	ret = kstrtol(buf, 0, &level);
+
+	if (ret) {
+		count = -EINVAL;
+		goto fail;
+	}
+
+	if (adev->pp_enabled)
+		amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
+fail:
+	return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	ssize_t size = 0;
+
+	if (adev->pp_enabled)
+		size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+
+	return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	int ret;
+	long level;
+
+	ret = kstrtol(buf, 0, &level);
+
+	if (ret) {
+		count = -EINVAL;
+		goto fail;
+	}
+
+	if (adev->pp_enabled)
+		amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
+fail:
+	return count;
+}
+
 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
 		   amdgpu_get_dpm_forced_performance_level,
 		   amdgpu_set_dpm_forced_performance_level);
+static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
+static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
+static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
+		amdgpu_get_pp_force_state,
+		amdgpu_set_pp_force_state);
+static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
+		amdgpu_get_pp_table,
+		amdgpu_set_pp_table);
+static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
+		amdgpu_get_pp_dpm_sclk,
+		amdgpu_set_pp_dpm_sclk);
+static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
+		amdgpu_get_pp_dpm_mclk,
+		amdgpu_set_pp_dpm_mclk);
+static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
+		amdgpu_get_pp_dpm_pcie,
+		amdgpu_set_pp_dpm_pcie);
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
 				      struct device_attribute *attr,
@@ -637,14 +924,12 @@
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&adev->ring_lock);
-
 	/* update whether vce is active */
 	ps->vce_active = adev->pm.dpm.vce_active;
 
 	ret = amdgpu_dpm_pre_set_power_state(adev);
 	if (ret)
-		goto done;
+		return;
 
 	/* update display watermarks based on new power state */
 	amdgpu_display_bandwidth_update(adev);
@@ -682,9 +967,6 @@
 			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
 		}
 	}
-
-done:
-	mutex_unlock(&adev->ring_lock);
 }
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
@@ -785,6 +1067,44 @@
 		DRM_ERROR("failed to create device file for dpm state\n");
 		return ret;
 	}
+
+	if (adev->pp_enabled) {
+		ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_num_states\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_cur_state\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_force_state\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_table);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_table\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+			return ret;
+		}
+		ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+		if (ret) {
+			DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+			return ret;
+		}
+	}
 	ret = amdgpu_debugfs_pm_init(adev);
 	if (ret) {
 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -802,6 +1122,15 @@
 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
 	device_remove_file(adev->dev, &dev_attr_power_dpm_state);
 	device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
+	if (adev->pp_enabled) {
+		device_remove_file(adev->dev, &dev_attr_pp_num_states);
+		device_remove_file(adev->dev, &dev_attr_pp_cur_state);
+		device_remove_file(adev->dev, &dev_attr_pp_force_state);
+		device_remove_file(adev->dev, &dev_attr_pp_table);
+		device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+		device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+		device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+	}
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -817,13 +1146,11 @@
 		int i = 0;
 
 		amdgpu_display_bandwidth_update(adev);
-		mutex_lock(&adev->ring_lock);
-			for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-				struct amdgpu_ring *ring = adev->rings[i];
-				if (ring && ring->ready)
-					amdgpu_fence_wait_empty(ring);
-			}
-		mutex_unlock(&adev->ring_lock);
+		for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+			struct amdgpu_ring *ring = adev->rings[i];
+			if (ring && ring->ready)
+				amdgpu_fence_wait_empty(ring);
+		}
 
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
 	} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 59f735a..be6388f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -73,10 +73,6 @@
 	if (ret)
 		return ERR_PTR(ret);
 
-	mutex_lock(&adev->gem.mutex);
-	list_add_tail(&bo->list, &adev->gem.objects);
-	mutex_unlock(&adev->gem.mutex);
-
 	return &bo->gem_base;
 }
 
@@ -121,7 +117,7 @@
 {
 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 
-	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 		return ERR_PTR(-EPERM);
 
 	return drm_gem_prime_export(dev, gobj, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d1f234d..972eed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -49,28 +49,6 @@
 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 
 /**
- * amdgpu_ring_free_size - update the free size
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- *
- * Update the free dw slots in the ring buffer (all asics).
- */
-void amdgpu_ring_free_size(struct amdgpu_ring *ring)
-{
-	uint32_t rptr = amdgpu_ring_get_rptr(ring);
-
-	/* This works because ring_size is a power of 2 */
-	ring->ring_free_dw = rptr + (ring->ring_size / 4);
-	ring->ring_free_dw -= ring->wptr;
-	ring->ring_free_dw &= ring->ptr_mask;
-	if (!ring->ring_free_dw) {
-		/* this is an empty ring */
-		ring->ring_free_dw = ring->ring_size / 4;
-	}
-}
-
-/**
  * amdgpu_ring_alloc - allocate space on the ring buffer
  *
  * @adev: amdgpu_device pointer
@@ -82,53 +60,21 @@
  */
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
 {
-	int r;
-
-	/* make sure we aren't trying to allocate more space than there is on the ring */
-	if (ndw > (ring->ring_size / 4))
-		return -ENOMEM;
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
-	amdgpu_ring_free_size(ring);
 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
-	while (ndw > (ring->ring_free_dw - 1)) {
-		amdgpu_ring_free_size(ring);
-		if (ndw < ring->ring_free_dw) {
-			break;
-		}
-		r = amdgpu_fence_wait_next(ring);
-		if (r)
-			return r;
-	}
+
+	/* Make sure we aren't trying to allocate more space
+	 * than the maximum for one submission
+	 */
+	if (WARN_ON_ONCE(ndw > ring->max_dw))
+		return -ENOMEM;
+
 	ring->count_dw = ndw;
 	ring->wptr_old = ring->wptr;
 	return 0;
 }
 
-/**
- * amdgpu_ring_lock - lock the ring and allocate space on it
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- * @ndw: number of dwords to allocate in the ring buffer
- *
- * Lock the ring and allocate @ndw dwords in the ring buffer
- * (all asics).
- * Returns 0 on success, error on failure.
- */
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
-{
-	int r;
-
-	mutex_lock(ring->ring_lock);
-	r = amdgpu_ring_alloc(ring, ndw);
-	if (r) {
-		mutex_unlock(ring->ring_lock);
-		return r;
-	}
-	return 0;
-}
-
 /** amdgpu_ring_insert_nop - insert NOP packets
  *
  * @ring: amdgpu_ring structure holding ring information
@@ -144,6 +90,19 @@
 		amdgpu_ring_write(ring, ring->nop);
 }
 
+/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: IB to add NOP packets to
+ *
+ * This is the generic pad_ib function for rings except SDMA
+ */
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+	while (ib->length_dw & ring->align_mask)
+		ib->ptr[ib->length_dw++] = ring->nop;
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
@@ -168,20 +127,6 @@
 }
 
 /**
- * amdgpu_ring_unlock_commit - tell the GPU to execute the new
- * commands on the ring buffer and unlock it
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_commit() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
-{
-	amdgpu_ring_commit(ring);
-	mutex_unlock(ring->ring_lock);
-}
-
-/**
  * amdgpu_ring_undo - reset the wptr
  *
  * @ring: amdgpu_ring structure holding ring information
@@ -194,19 +139,6 @@
 }
 
 /**
- * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_undo() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
-{
-	amdgpu_ring_undo(ring);
-	mutex_unlock(ring->ring_lock);
-}
-
-/**
  * amdgpu_ring_backup - Back up the content of a ring
  *
  * @ring: the ring we want to back up
@@ -218,43 +150,32 @@
 {
 	unsigned size, ptr, i;
 
-	/* just in case lock the ring */
-	mutex_lock(ring->ring_lock);
 	*data = NULL;
 
-	if (ring->ring_obj == NULL) {
-		mutex_unlock(ring->ring_lock);
+	if (ring->ring_obj == NULL)
 		return 0;
-	}
 
 	/* it doesn't make sense to save anything if all fences are signaled */
-	if (!amdgpu_fence_count_emitted(ring)) {
-		mutex_unlock(ring->ring_lock);
+	if (!amdgpu_fence_count_emitted(ring))
 		return 0;
-	}
 
 	ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
 
 	size = ring->wptr + (ring->ring_size / 4);
 	size -= ptr;
 	size &= ring->ptr_mask;
-	if (size == 0) {
-		mutex_unlock(ring->ring_lock);
+	if (size == 0)
 		return 0;
-	}
 
 	/* and then save the content of the ring */
 	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
-	if (!*data) {
-		mutex_unlock(ring->ring_lock);
+	if (!*data)
 		return 0;
-	}
 	for (i = 0; i < size; ++i) {
 		(*data)[i] = ring->ring[ptr++];
 		ptr &= ring->ptr_mask;
 	}
 
-	mutex_unlock(ring->ring_lock);
 	return size;
 }
 
@@ -276,7 +197,7 @@
 		return 0;
 
 	/* restore the saved ring content */
-	r = amdgpu_ring_lock(ring, size);
+	r = amdgpu_ring_alloc(ring, size);
 	if (r)
 		return r;
 
@@ -284,7 +205,7 @@
 		amdgpu_ring_write(ring, data[i]);
 	}
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 	kfree(data);
 	return 0;
 }
@@ -315,7 +236,8 @@
 		ring->adev = adev;
 		ring->idx = adev->num_rings++;
 		adev->rings[ring->idx] = ring;
-		r = amdgpu_fence_driver_init_ring(ring);
+		r = amdgpu_fence_driver_init_ring(ring,
+			amdgpu_sched_hw_submission);
 		if (r)
 			return r;
 	}
@@ -352,7 +274,6 @@
 		return r;
 	}
 
-	ring->ring_lock = &adev->ring_lock;
 	/* Align ring size */
 	rb_bufsz = order_base_2(ring_size / 8);
 	ring_size = (1 << (rb_bufsz + 1)) * 4;
@@ -389,7 +310,8 @@
 		}
 	}
 	ring->ptr_mask = (ring->ring_size / 4) - 1;
-	ring->ring_free_dw = ring->ring_size / 4;
+	ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
+				    amdgpu_sched_hw_submission);
 
 	if (amdgpu_debugfs_ring_init(adev, ring)) {
 		DRM_ERROR("Failed to register debugfs file for rings !\n");
@@ -410,15 +332,10 @@
 	int r;
 	struct amdgpu_bo *ring_obj;
 
-	if (ring->ring_lock == NULL)
-		return;
-
-	mutex_lock(ring->ring_lock);
 	ring_obj = ring->ring_obj;
 	ring->ready = false;
 	ring->ring = NULL;
 	ring->ring_obj = NULL;
-	mutex_unlock(ring->ring_lock);
 
 	amdgpu_wb_free(ring->adev, ring->fence_offs);
 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
@@ -436,30 +353,6 @@
 	}
 }
 
-/**
- * amdgpu_ring_from_fence - get ring from fence
- *
- * @f: fence structure
- *
- * Extract the ring a fence belongs to. Handles both scheduler as
- * well as hardware fences.
- */
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
-{
-	struct amdgpu_fence *a_fence;
-	struct amd_sched_fence *s_fence;
-
-	s_fence = to_amd_sched_fence(f);
-	if (s_fence)
-		return container_of(s_fence->sched, struct amdgpu_ring, sched);
-
-	a_fence = to_amdgpu_fence(f);
-	if (a_fence)
-		return a_fence->ring;
-
-	return NULL;
-}
-
 /*
  * Debugfs info
  */
@@ -474,29 +367,18 @@
 	struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
 
 	uint32_t rptr, wptr, rptr_next;
-	unsigned count, i, j;
-
-	amdgpu_ring_free_size(ring);
-	count = (ring->ring_size / 4) - ring->ring_free_dw;
+	unsigned i;
 
 	wptr = amdgpu_ring_get_wptr(ring);
-	seq_printf(m, "wptr: 0x%08x [%5d]\n",
-		   wptr, wptr);
+	seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
 
 	rptr = amdgpu_ring_get_rptr(ring);
-	seq_printf(m, "rptr: 0x%08x [%5d]\n",
-		   rptr, rptr);
-
 	rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
 
+	seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
+
 	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
 		   ring->wptr, ring->wptr);
-	seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
-		   ring->last_semaphore_signal_addr);
-	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n",
-		   ring->last_semaphore_wait_addr);
-	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
-	seq_printf(m, "%u dwords in ring\n", count);
 
 	if (!ring->ready)
 		return 0;
@@ -505,11 +387,20 @@
 	 * packet that is the root issue
 	 */
 	i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-	for (j = 0; j <= (count + 32); j++) {
+	while (i != rptr) {
 		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
-		if (rptr == i)
+		if (i == rptr)
 			seq_puts(m, " *");
-		if (rptr_next == i)
+		if (i == rptr_next)
+			seq_puts(m, " #");
+		seq_puts(m, "\n");
+		i = (i + 1) & ring->ptr_mask;
+	}
+	while (i != wptr) {
+		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+		if (i == rptr)
+			seq_puts(m, " *");
+		if (i == rptr_next)
 			seq_puts(m, " #");
 		seq_puts(m, "\n");
 		i = (i + 1) & ring->ptr_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index ca72a2e..8bf84ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -60,9 +60,8 @@
 	sa_manager->align = align;
 	sa_manager->hole = &sa_manager->olist;
 	INIT_LIST_HEAD(&sa_manager->olist);
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 		INIT_LIST_HEAD(&sa_manager->flist[i]);
-	}
 
 	r = amdgpu_bo_create(adev, size, align, true, domain,
 			     0, NULL, NULL, &sa_manager->bo);
@@ -228,11 +227,9 @@
 	unsigned soffset, eoffset, wasted;
 	int i;
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		if (!list_empty(&sa_manager->flist[i])) {
+	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
+		if (!list_empty(&sa_manager->flist[i]))
 			return true;
-		}
-	}
 
 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
@@ -265,12 +262,11 @@
 	/* go over all fence list and try to find the closest sa_bo
 	 * of the current last
 	 */
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
 		struct amdgpu_sa_bo *sa_bo;
 
-		if (list_empty(&sa_manager->flist[i])) {
+		if (list_empty(&sa_manager->flist[i]))
 			continue;
-		}
 
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
@@ -299,7 +295,9 @@
 	}
 
 	if (best_bo) {
-		uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
+		uint32_t idx = best_bo->fence->context;
+
+		idx %= AMDGPU_SA_NUM_FENCE_LISTS;
 		++tries[idx];
 		sa_manager->hole = best_bo->olist.prev;
 
@@ -315,14 +313,17 @@
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct fence *fences[AMDGPU_MAX_RINGS];
-	unsigned tries[AMDGPU_MAX_RINGS];
+	struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
 	unsigned count;
 	int i, r;
 	signed long t;
 
-	BUG_ON(align > sa_manager->align);
-	BUG_ON(size > sa_manager->size);
+	if (WARN_ON_ONCE(align > sa_manager->align))
+		return -EINVAL;
+
+	if (WARN_ON_ONCE(size > sa_manager->size))
+		return -EINVAL;
 
 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
 	if ((*sa_bo) == NULL) {
@@ -335,7 +336,7 @@
 
 	spin_lock(&sa_manager->wq.lock);
 	do {
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
 			fences[i] = NULL;
 			tries[i] = 0;
 		}
@@ -352,7 +353,7 @@
 			/* see if we can skip over some allocations */
 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
-		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
 			if (fences[i])
 				fences[count++] = fence_get(fences[i]);
 
@@ -394,8 +395,9 @@
 	spin_lock(&sa_manager->wq.lock);
 	if (fence && !fence_is_signaled(fence)) {
 		uint32_t idx;
+
 		(*sa_bo)->fence = fence_get(fence);
-		idx = amdgpu_ring_from_fence(fence)->idx;
+		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
 		amdgpu_sa_bo_remove_locked(*sa_bo);
@@ -407,25 +409,6 @@
 
 #if defined(CONFIG_DEBUG_FS)
 
-static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
-{
-	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
-	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
-
-	if (a_fence)
-		seq_printf(m, " protected by 0x%016llx on ring %d",
-			   a_fence->seq, a_fence->ring->idx);
-
-	if (s_fence) {
-		struct amdgpu_ring *ring;
-
-
-		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
-		seq_printf(m, " protected by 0x%016x on ring %d",
-			   s_fence->base.seqno, ring->idx);
-	}
-}
-
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 				  struct seq_file *m)
 {
@@ -442,8 +425,11 @@
 		}
 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 			   soffset, eoffset, eoffset - soffset);
+
 		if (i->fence)
-			amdgpu_sa_bo_dump_fence(i->fence, m);
+			seq_printf(m, " protected by 0x%08x on context %d",
+				   i->fence->seqno, i->fence->context);
+
 		seq_printf(m, "\n");
 	}
 	spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
deleted file mode 100644
index 438c052..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#include <linux/kthread.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_trace.h"
-
-static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
-{
-	struct amdgpu_job *job = to_amdgpu_job(sched_job);
-	return amdgpu_sync_get_fence(&job->ibs->sync);
-}
-
-static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
-{
-	struct amdgpu_fence *fence = NULL;
-	struct amdgpu_job *job;
-	int r;
-
-	if (!sched_job) {
-		DRM_ERROR("job is null\n");
-		return NULL;
-	}
-	job = to_amdgpu_job(sched_job);
-	trace_amdgpu_sched_run_job(job);
-	r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
-	if (r) {
-		DRM_ERROR("Error scheduling IBs (%d)\n", r);
-		goto err;
-	}
-
-	fence = job->ibs[job->num_ibs - 1].fence;
-	fence_get(&fence->base);
-
-err:
-	if (job->free_job)
-		job->free_job(job);
-
-	kfree(job);
-	return fence ? &fence->base : NULL;
-}
-
-struct amd_sched_backend_ops amdgpu_sched_ops = {
-	.dependency = amdgpu_sched_dependency,
-	.run_job = amdgpu_sched_run_job,
-};
-
-int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
-					 struct amdgpu_ring *ring,
-					 struct amdgpu_ib *ibs,
-					 unsigned num_ibs,
-					 int (*free_job)(struct amdgpu_job *),
-					 void *owner,
-					 struct fence **f)
-{
-	int r = 0;
-	if (amdgpu_enable_scheduler) {
-		struct amdgpu_job *job =
-			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
-		if (!job)
-			return -ENOMEM;
-		job->base.sched = &ring->sched;
-		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
-		job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
-		if (!job->base.s_fence) {
-			kfree(job);
-			return -ENOMEM;
-		}
-		*f = fence_get(&job->base.s_fence->base);
-
-		job->adev = adev;
-		job->ibs = ibs;
-		job->num_ibs = num_ibs;
-		job->owner = owner;
-		job->free_job = free_job;
-		amd_sched_entity_push_job(&job->base);
-	} else {
-		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
-		if (r)
-			return r;
-		*f = fence_get(&ibs[num_ibs - 1].fence->base);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
deleted file mode 100644
index 1caaf20..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2011 Christian König.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors:
- *    Christian König <deathsimple@vodafone.de>
- */
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_trace.h"
-
-int amdgpu_semaphore_create(struct amdgpu_device *adev,
-			    struct amdgpu_semaphore **semaphore)
-{
-	int r;
-
-	*semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
-	if (*semaphore == NULL) {
-		return -ENOMEM;
-	}
-	r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
-			     &(*semaphore)->sa_bo, 8, 8);
-	if (r) {
-		kfree(*semaphore);
-		*semaphore = NULL;
-		return r;
-	}
-	(*semaphore)->waiters = 0;
-	(*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
-
-	*((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
-
-	return 0;
-}
-
-bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
-				  struct amdgpu_semaphore *semaphore)
-{
-	trace_amdgpu_semaphore_signale(ring->idx, semaphore);
-
-	if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
-		--semaphore->waiters;
-
-		/* for debugging lockup only, used by sysfs debug files */
-		ring->last_semaphore_signal_addr = semaphore->gpu_addr;
-		return true;
-	}
-	return false;
-}
-
-bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
-				struct amdgpu_semaphore *semaphore)
-{
-	trace_amdgpu_semaphore_wait(ring->idx, semaphore);
-
-	if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
-		++semaphore->waiters;
-
-		/* for debugging lockup only, used by sysfs debug files */
-		ring->last_semaphore_wait_addr = semaphore->gpu_addr;
-		return true;
-	}
-	return false;
-}
-
-void amdgpu_semaphore_free(struct amdgpu_device *adev,
-			   struct amdgpu_semaphore **semaphore,
-			   struct fence *fence)
-{
-	if (semaphore == NULL || *semaphore == NULL) {
-		return;
-	}
-	if ((*semaphore)->waiters > 0) {
-		dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
-			" hardware lockup imminent!\n", *semaphore);
-	}
-	amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
-	kfree(*semaphore);
-	*semaphore = NULL;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 181ce39..c48b4fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -37,6 +37,8 @@
 	struct fence		*fence;
 };
 
+static struct kmem_cache *amdgpu_sync_slab;
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -46,26 +48,22 @@
  */
 void amdgpu_sync_create(struct amdgpu_sync *sync)
 {
-	unsigned i;
-
-	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
-		sync->semaphores[i] = NULL;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-		sync->sync_to[i] = NULL;
-
 	hash_init(sync->fences);
 	sync->last_vm_update = NULL;
 }
 
+/**
+ * amdgpu_sync_same_dev - test if fence belong to us
+ *
+ * @adev: amdgpu device to use for the test
+ * @f: fence to test
+ *
+ * Test if the fence was issued by us.
+ */
 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
 {
-	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
 
-	if (a_fence)
-		return a_fence->ring->adev == adev;
-
 	if (s_fence) {
 		struct amdgpu_ring *ring;
 
@@ -76,17 +74,31 @@
 	return false;
 }
 
-static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+/**
+ * amdgpu_sync_get_owner - extract the owner of a fence
+ *
+ * @fence: fence get the owner from
+ *
+ * Extract who originally created the fence.
+ */
+static void *amdgpu_sync_get_owner(struct fence *f)
 {
-	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
 	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
 	if (s_fence)
-		return s_fence->owner == owner;
-	if (a_fence)
-		return a_fence->owner == owner;
-	return false;
+		return s_fence->owner;
+
+	return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
+/**
+ * amdgpu_sync_keep_later - Keep the later fence
+ *
+ * @keep: existing fence to test
+ * @fence: new fence
+ *
+ * Either keep the existing fence or the new one, depending which one is later.
+ */
 static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
 {
 	if (*keep && fence_is_later(*keep, fence))
@@ -107,59 +119,39 @@
 		      struct fence *f)
 {
 	struct amdgpu_sync_entry *e;
-	struct amdgpu_fence *fence;
 
 	if (!f)
 		return 0;
 
 	if (amdgpu_sync_same_dev(adev, f) &&
-	    amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
+	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
 
-	fence = to_amdgpu_fence(f);
-	if (!fence || fence->ring->adev != adev) {
-		hash_for_each_possible(sync->fences, e, node, f->context) {
-			if (unlikely(e->fence->context != f->context))
-				continue;
+	hash_for_each_possible(sync->fences, e, node, f->context) {
+		if (unlikely(e->fence->context != f->context))
+			continue;
 
-			amdgpu_sync_keep_later(&e->fence, f);
-			return 0;
-		}
-
-		e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
-		if (!e)
-			return -ENOMEM;
-
-		hash_add(sync->fences, &e->node, f->context);
-		e->fence = fence_get(f);
+		amdgpu_sync_keep_later(&e->fence, f);
 		return 0;
 	}
 
-	amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f);
+	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
 
+	hash_add(sync->fences, &e->node, f->context);
+	e->fence = fence_get(f);
 	return 0;
 }
 
-static void *amdgpu_sync_get_owner(struct fence *f)
-{
-	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
-	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
-
-	if (s_fence)
-		return s_fence->owner;
-	else if (a_fence)
-		return a_fence->owner;
-	return AMDGPU_FENCE_OWNER_UNDEFINED;
-}
-
 /**
- * amdgpu_sync_resv - use the semaphores to sync to a reservation object
+ * amdgpu_sync_resv - sync to a reservation object
  *
  * @sync: sync object to add fences from reservation object to
  * @resv: reservation object with embedded fence
  * @shared: true if we should only sync to the exclusive fence
  *
- * Sync to the fence using the semaphore objects
+ * Sync to the fence
  */
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
@@ -224,7 +216,7 @@
 		f = e->fence;
 
 		hash_del(&e->node);
-		kfree(e);
+		kmem_cache_free(amdgpu_sync_slab, e);
 
 		if (!fence_is_signaled(f))
 			return f;
@@ -247,109 +239,7 @@
 
 		hash_del(&e->node);
 		fence_put(e->fence);
-		kfree(e);
-	}
-
-	if (amdgpu_enable_semaphores)
-		return 0;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		struct fence *fence = sync->sync_to[i];
-		if (!fence)
-			continue;
-
-		r = fence_wait(fence, false);
-		if (r)
-			return r;
-	}
-
-	return 0;
-}
-
-/**
- * amdgpu_sync_rings - sync ring to all registered fences
- *
- * @sync: sync object to use
- * @ring: ring that needs sync
- *
- * Ensure that all registered fences are signaled before letting
- * the ring continue. The caller must hold the ring lock.
- */
-int amdgpu_sync_rings(struct amdgpu_sync *sync,
-		      struct amdgpu_ring *ring)
-{
-	struct amdgpu_device *adev = ring->adev;
-	unsigned count = 0;
-	int i, r;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		struct amdgpu_ring *other = adev->rings[i];
-		struct amdgpu_semaphore *semaphore;
-		struct amdgpu_fence *fence;
-
-		if (!sync->sync_to[i])
-			continue;
-
-		fence = to_amdgpu_fence(sync->sync_to[i]);
-
-		/* check if we really need to sync */
-		if (!amdgpu_enable_scheduler &&
-		    !amdgpu_fence_need_sync(fence, ring))
-			continue;
-
-		/* prevent GPU deadlocks */
-		if (!other->ready) {
-			dev_err(adev->dev, "Syncing to a disabled ring!");
-			return -EINVAL;
-		}
-
-		if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
-			r = fence_wait(sync->sync_to[i], true);
-			if (r)
-				return r;
-			continue;
-		}
-
-		if (count >= AMDGPU_NUM_SYNCS) {
-			/* not enough room, wait manually */
-			r = fence_wait(&fence->base, false);
-			if (r)
-				return r;
-			continue;
-		}
-		r = amdgpu_semaphore_create(adev, &semaphore);
-		if (r)
-			return r;
-
-		sync->semaphores[count++] = semaphore;
-
-		/* allocate enough space for sync command */
-		r = amdgpu_ring_alloc(other, 16);
-		if (r)
-			return r;
-
-		/* emit the signal semaphore */
-		if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
-			/* signaling wasn't successful wait manually */
-			amdgpu_ring_undo(other);
-			r = fence_wait(&fence->base, false);
-			if (r)
-				return r;
-			continue;
-		}
-
-		/* we assume caller has already allocated space on waiters ring */
-		if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
-			/* waiting wasn't successful wait manually */
-			amdgpu_ring_undo(other);
-			r = fence_wait(&fence->base, false);
-			if (r)
-				return r;
-			continue;
-		}
-
-		amdgpu_ring_commit(other);
-		amdgpu_fence_note_sync(fence, ring);
+		kmem_cache_free(amdgpu_sync_slab, e);
 	}
 
 	return 0;
@@ -358,15 +248,11 @@
 /**
  * amdgpu_sync_free - free the sync object
  *
- * @adev: amdgpu_device pointer
  * @sync: sync object to use
- * @fence: fence to use for the free
  *
- * Free the sync object by freeing all semaphores in it.
+ * Free the sync object.
  */
-void amdgpu_sync_free(struct amdgpu_device *adev,
-		      struct amdgpu_sync *sync,
-		      struct fence *fence)
+void amdgpu_sync_free(struct amdgpu_sync *sync)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
@@ -375,14 +261,34 @@
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 		hash_del(&e->node);
 		fence_put(e->fence);
-		kfree(e);
+		kmem_cache_free(amdgpu_sync_slab, e);
 	}
 
-	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
-		amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-		fence_put(sync->sync_to[i]);
-
 	fence_put(sync->last_vm_update);
 }
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+	amdgpu_sync_slab = kmem_cache_create(
+		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+		SLAB_HWCACHE_ALIGN, NULL);
+	if (!amdgpu_sync_slab)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+	kmem_cache_destroy(amdgpu_sync_slab);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 4865615..05a53f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -238,144 +238,10 @@
 		amdgpu_do_test_moves(adev);
 }
 
-static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
-					     struct amdgpu_ring *ring,
-					     struct fence **fence)
-{
-	uint32_t handle = ring->idx ^ 0xdeafbeef;
-	int r;
-
-	if (ring == &adev->uvd.ring) {
-		r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
-		if (r) {
-			DRM_ERROR("Failed to get dummy create msg\n");
-			return r;
-		}
-
-		r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
-		if (r) {
-			DRM_ERROR("Failed to get dummy destroy msg\n");
-			return r;
-		}
-
-	} else if (ring == &adev->vce.ring[0] ||
-		   ring == &adev->vce.ring[1]) {
-		r = amdgpu_vce_get_create_msg(ring, handle, NULL);
-		if (r) {
-			DRM_ERROR("Failed to get dummy create msg\n");
-			return r;
-		}
-
-		r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
-		if (r) {
-			DRM_ERROR("Failed to get dummy destroy msg\n");
-			return r;
-		}
-	} else {
-		struct amdgpu_fence *a_fence = NULL;
-		r = amdgpu_ring_lock(ring, 64);
-		if (r) {
-			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
-			return r;
-		}
-		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
-		amdgpu_ring_unlock_commit(ring);
-		*fence = &a_fence->base;
-	}
-	return 0;
-}
-
 void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 			   struct amdgpu_ring *ringA,
 			   struct amdgpu_ring *ringB)
 {
-	struct fence *fence1 = NULL, *fence2 = NULL;
-	struct amdgpu_semaphore *semaphore = NULL;
-	int r;
-
-	r = amdgpu_semaphore_create(adev, &semaphore);
-	if (r) {
-		DRM_ERROR("Failed to create semaphore\n");
-		goto out_cleanup;
-	}
-
-	r = amdgpu_ring_lock(ringA, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_wait(ringA, semaphore);
-	amdgpu_ring_unlock_commit(ringA);
-
-	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
-	if (r)
-		goto out_cleanup;
-
-	r = amdgpu_ring_lock(ringA, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_wait(ringA, semaphore);
-	amdgpu_ring_unlock_commit(ringA);
-
-	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
-	if (r)
-		goto out_cleanup;
-
-	mdelay(1000);
-
-	if (fence_is_signaled(fence1)) {
-		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
-		goto out_cleanup;
-	}
-
-	r = amdgpu_ring_lock(ringB, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring B %p\n", ringB);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_signal(ringB, semaphore);
-	amdgpu_ring_unlock_commit(ringB);
-
-	r = fence_wait(fence1, false);
-	if (r) {
-		DRM_ERROR("Failed to wait for sync fence 1\n");
-		goto out_cleanup;
-	}
-
-	mdelay(1000);
-
-	if (fence_is_signaled(fence2)) {
-		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
-		goto out_cleanup;
-	}
-
-	r = amdgpu_ring_lock(ringB, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring B %p\n", ringB);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_signal(ringB, semaphore);
-	amdgpu_ring_unlock_commit(ringB);
-
-	r = fence_wait(fence2, false);
-	if (r) {
-		DRM_ERROR("Failed to wait for sync fence 1\n");
-		goto out_cleanup;
-	}
-
-out_cleanup:
-	amdgpu_semaphore_free(adev, &semaphore, NULL);
-
-	if (fence1)
-		fence_put(fence1);
-
-	if (fence2)
-		fence_put(fence2);
-
-	if (r)
-		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
 }
 
 static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
@@ -383,109 +249,6 @@
 			    struct amdgpu_ring *ringB,
 			    struct amdgpu_ring *ringC)
 {
-	struct fence *fenceA = NULL, *fenceB = NULL;
-	struct amdgpu_semaphore *semaphore = NULL;
-	bool sigA, sigB;
-	int i, r;
-
-	r = amdgpu_semaphore_create(adev, &semaphore);
-	if (r) {
-		DRM_ERROR("Failed to create semaphore\n");
-		goto out_cleanup;
-	}
-
-	r = amdgpu_ring_lock(ringA, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_wait(ringA, semaphore);
-	amdgpu_ring_unlock_commit(ringA);
-
-	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
-	if (r)
-		goto out_cleanup;
-
-	r = amdgpu_ring_lock(ringB, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_wait(ringB, semaphore);
-	amdgpu_ring_unlock_commit(ringB);
-	r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
-	if (r)
-		goto out_cleanup;
-
-	mdelay(1000);
-
-	if (fence_is_signaled(fenceA)) {
-		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
-		goto out_cleanup;
-	}
-	if (fence_is_signaled(fenceB)) {
-		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
-		goto out_cleanup;
-	}
-
-	r = amdgpu_ring_lock(ringC, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring B %p\n", ringC);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_signal(ringC, semaphore);
-	amdgpu_ring_unlock_commit(ringC);
-
-	for (i = 0; i < 30; ++i) {
-		mdelay(100);
-		sigA = fence_is_signaled(fenceA);
-		sigB = fence_is_signaled(fenceB);
-		if (sigA || sigB)
-			break;
-	}
-
-	if (!sigA && !sigB) {
-		DRM_ERROR("Neither fence A nor B has been signaled\n");
-		goto out_cleanup;
-	} else if (sigA && sigB) {
-		DRM_ERROR("Both fence A and B has been signaled\n");
-		goto out_cleanup;
-	}
-
-	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
-
-	r = amdgpu_ring_lock(ringC, 64);
-	if (r) {
-		DRM_ERROR("Failed to lock ring B %p\n", ringC);
-		goto out_cleanup;
-	}
-	amdgpu_semaphore_emit_signal(ringC, semaphore);
-	amdgpu_ring_unlock_commit(ringC);
-
-	mdelay(1000);
-
-	r = fence_wait(fenceA, false);
-	if (r) {
-		DRM_ERROR("Failed to wait for sync fence A\n");
-		goto out_cleanup;
-	}
-	r = fence_wait(fenceB, false);
-	if (r) {
-		DRM_ERROR("Failed to wait for sync fence B\n");
-		goto out_cleanup;
-	}
-
-out_cleanup:
-	amdgpu_semaphore_free(adev, &semaphore, NULL);
-
-	if (fenceA)
-		fence_put(fenceA);
-
-	if (fenceB)
-		fence_put(fenceB);
-
-	if (r)
-		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
 }
 
 static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 8f9834ab..26a5f4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -38,10 +38,10 @@
 
 	    TP_fast_assign(
 			   __entry->bo_list = p->bo_list;
-			   __entry->ring = p->ibs[i].ring->idx;
-			   __entry->dw = p->ibs[i].length_dw;
+			   __entry->ring = p->job->ring->idx;
+			   __entry->dw = p->job->ibs[i].length_dw;
 			   __entry->fences = amdgpu_fence_count_emitted(
-				p->ibs[i].ring);
+				p->job->ring);
 			   ),
 	    TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
 		      __entry->bo_list, __entry->ring, __entry->dw,
@@ -65,7 +65,7 @@
 			   __entry->sched_job = &job->base;
 			   __entry->ib = job->ibs;
 			   __entry->fence = &job->base.s_fence->base;
-			   __entry->ring_name = job->ibs[0].ring->name;
+			   __entry->ring_name = job->ring->name;
 			   __entry->num_ibs = job->num_ibs;
 			   ),
 	    TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
@@ -90,7 +90,7 @@
 			   __entry->sched_job = &job->base;
 			   __entry->ib = job->ibs;
 			   __entry->fence = &job->base.s_fence->base;
-			   __entry->ring_name = job->ibs[0].ring->name;
+			   __entry->ring_name = job->ring->name;
 			   __entry->num_ibs = job->num_ibs;
 			   ),
 	    TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
@@ -100,18 +100,24 @@
 
 
 TRACE_EVENT(amdgpu_vm_grab_id,
-	    TP_PROTO(unsigned vmid, int ring),
-	    TP_ARGS(vmid, ring),
+	    TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
+		     uint64_t pd_addr),
+	    TP_ARGS(vm, ring, vmid, pd_addr),
 	    TP_STRUCT__entry(
-			     __field(u32, vmid)
+			     __field(struct amdgpu_vm *, vm)
 			     __field(u32, ring)
+			     __field(u32, vmid)
+			     __field(u64, pd_addr)
 			     ),
 
 	    TP_fast_assign(
-			   __entry->vmid = vmid;
+			   __entry->vm = vm;
 			   __entry->ring = ring;
+			   __entry->vmid = vmid;
+			   __entry->pd_addr = pd_addr;
 			   ),
-	    TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+	    TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
+		      __entry->ring, __entry->vmid, __entry->pd_addr)
 );
 
 TRACE_EVENT(amdgpu_vm_bo_map,
@@ -228,8 +234,8 @@
 			   __entry->ring = ring;
 			   __entry->id = id;
 			   ),
-	    TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
-		      __entry->pd_addr, __entry->ring, __entry->id)
+	    TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
+		      __entry->ring, __entry->id, __entry->pd_addr)
 );
 
 TRACE_EVENT(amdgpu_bo_list_set,
@@ -247,42 +253,6 @@
 	    TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
 );
 
-DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
-
-	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
-	    TP_ARGS(ring, sem),
-
-	    TP_STRUCT__entry(
-			     __field(int, ring)
-			     __field(signed, waiters)
-			     __field(uint64_t, gpu_addr)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->ring = ring;
-			   __entry->waiters = sem->waiters;
-			   __entry->gpu_addr = sem->gpu_addr;
-			   ),
-
-	    TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
-		      __entry->waiters, __entry->gpu_addr)
-);
-
-DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
-
-	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
-	    TP_ARGS(ring, sem)
-);
-
-DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
-
-	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
-	    TP_ARGS(ring, sem)
-);
-
 #endif
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7b82e57..ab34190 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -77,6 +77,8 @@
 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
 	struct drm_global_reference *global_ref;
+	struct amdgpu_ring *ring;
+	struct amd_sched_rq *rq;
 	int r;
 
 	adev->mman.mem_global_referenced = false;
@@ -106,13 +108,27 @@
 		return r;
 	}
 
+	ring = adev->mman.buffer_funcs_ring;
+	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
+				  rq, amdgpu_sched_jobs);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+		drm_global_item_unref(&adev->mman.mem_global_ref);
+		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+		return r;
+	}
+
 	adev->mman.mem_global_referenced = true;
+
 	return 0;
 }
 
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 {
 	if (adev->mman.mem_global_referenced) {
+		amd_sched_entity_fini(adev->mman.entity.sched,
+				      &adev->mman.entity);
 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 		drm_global_item_unref(&adev->mman.mem_global_ref);
 		adev->mman.mem_global_referenced = false;
@@ -478,32 +494,32 @@
 /*
  * TTM backend functions.
  */
-struct amdgpu_ttm_tt {
-	struct ttm_dma_tt		ttm;
-	struct amdgpu_device		*adev;
-	u64				offset;
-	uint64_t			userptr;
-	struct mm_struct		*usermm;
-	uint32_t			userflags;
+struct amdgpu_ttm_gup_task_list {
+	struct list_head	list;
+	struct task_struct	*task;
 };
 
-/* prepare the sg table with the user pages */
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+struct amdgpu_ttm_tt {
+	struct ttm_dma_tt	ttm;
+	struct amdgpu_device	*adev;
+	u64			offset;
+	uint64_t		userptr;
+	struct mm_struct	*usermm;
+	uint32_t		userflags;
+	spinlock_t              guptasklock;
+	struct list_head        guptasks;
+	atomic_t		mmu_invalidations;
+};
+
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
-	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
-	unsigned pinned = 0, nents;
+	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+	unsigned pinned = 0;
 	int r;
 
-	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
-	enum dma_data_direction direction = write ?
-		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-
-	if (current->mm != gtt->usermm)
-		return -EPERM;
-
 	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
-		/* check that we only pin down anonymous memory
+		/* check that we only use anonymous memory
 		   to prevent problems with writeback */
 		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 		struct vm_area_struct *vma;
@@ -516,9 +532,20 @@
 	do {
 		unsigned num_pages = ttm->num_pages - pinned;
 		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
-		struct page **pages = ttm->pages + pinned;
+		struct page **p = pages + pinned;
+		struct amdgpu_ttm_gup_task_list guptask;
 
-		r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+		guptask.task = current;
+		spin_lock(&gtt->guptasklock);
+		list_add(&guptask.list, &gtt->guptasks);
+		spin_unlock(&gtt->guptasklock);
+
+		r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+
+		spin_lock(&gtt->guptasklock);
+		list_del(&guptask.list);
+		spin_unlock(&gtt->guptasklock);
+
 		if (r < 0)
 			goto release_pages;
 
@@ -526,6 +553,25 @@
 
 	} while (pinned < ttm->num_pages);
 
+	return 0;
+
+release_pages:
+	release_pages(pages, pinned, 0);
+	return r;
+}
+
+/* prepare the sg table with the user pages */
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+{
+	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned nents;
+	int r;
+
+	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+	enum dma_data_direction direction = write ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 				      ttm->num_pages << PAGE_SHIFT,
 				      GFP_KERNEL);
@@ -544,9 +590,6 @@
 
 release_sg:
 	kfree(ttm->sg);
-
-release_pages:
-	release_pages(ttm->pages, pinned, 0);
 	return r;
 }
 
@@ -769,38 +812,61 @@
 	gtt->userptr = addr;
 	gtt->usermm = current->mm;
 	gtt->userflags = flags;
+	spin_lock_init(&gtt->guptasklock);
+	INIT_LIST_HEAD(&gtt->guptasks);
+	atomic_set(&gtt->mmu_invalidations, 0);
+
 	return 0;
 }
 
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
 	if (gtt == NULL)
-		return false;
+		return NULL;
 
-	return !!gtt->userptr;
+	return gtt->usermm;
 }
 
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 				  unsigned long end)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	struct amdgpu_ttm_gup_task_list *entry;
 	unsigned long size;
 
-	if (gtt == NULL)
-		return false;
-
-	if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+	if (gtt == NULL || !gtt->userptr)
 		return false;
 
 	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
 	if (gtt->userptr > end || gtt->userptr + size <= start)
 		return false;
 
+	spin_lock(&gtt->guptasklock);
+	list_for_each_entry(entry, &gtt->guptasks, list) {
+		if (entry->task == current) {
+			spin_unlock(&gtt->guptasklock);
+			return false;
+		}
+	}
+	spin_unlock(&gtt->guptasklock);
+
+	atomic_inc(&gtt->mmu_invalidations);
+
 	return true;
 }
 
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+				       int *last_invalidated)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	int prev_invalidated = *last_invalidated;
+
+	*last_invalidated = atomic_read(&gtt->mmu_invalidations);
+	return prev_invalidated != *last_invalidated;
+}
+
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1014,9 +1080,10 @@
 		       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_job *job;
+
 	uint32_t max_bytes;
 	unsigned num_loops, num_dw;
-	struct amdgpu_ib *ib;
 	unsigned i;
 	int r;
 
@@ -1028,20 +1095,12 @@
 	while (num_dw & 0x7)
 		num_dw++;
 
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
-		return -ENOMEM;
-
-	r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
-	if (r) {
-		kfree(ib);
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+	if (r)
 		return r;
-	}
-
-	ib->length_dw = 0;
 
 	if (resv) {
-		r = amdgpu_sync_resv(adev, &ib->sync, resv,
+		r = amdgpu_sync_resv(adev, &job->sync, resv,
 				     AMDGPU_FENCE_OWNER_UNDEFINED);
 		if (r) {
 			DRM_ERROR("sync failed (%d).\n", r);
@@ -1052,31 +1111,25 @@
 	for (i = 0; i < num_loops; i++) {
 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
-		amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
-					cur_size_in_bytes);
+		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
+					dst_offset, cur_size_in_bytes);
 
 		src_offset += cur_size_in_bytes;
 		dst_offset += cur_size_in_bytes;
 		byte_count -= cur_size_in_bytes;
 	}
 
-	amdgpu_vm_pad_ib(adev, ib);
-	WARN_ON(ib->length_dw > num_dw);
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_vm_free_job,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 fence);
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
+	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
 	if (r)
 		goto error_free;
 
-	if (!amdgpu_enable_scheduler) {
-		amdgpu_ib_free(adev, ib);
-		kfree(ib);
-	}
 	return 0;
+
 error_free:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 53f987a..c1a5810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -91,6 +91,8 @@
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 {
+	struct amdgpu_ring *ring;
+	struct amd_sched_rq *rq;
 	unsigned long bo_size;
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
@@ -191,6 +193,15 @@
 
 	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
 
+	ring = &adev->uvd.ring;
+	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+	r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
+				  rq, amdgpu_sched_jobs);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up UVD run queue.\n");
+		return r;
+	}
+
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 		atomic_set(&adev->uvd.handles[i], 0);
 		adev->uvd.filp[i] = NULL;
@@ -210,6 +221,8 @@
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
+	amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+
 	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
 	if (!r) {
 		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
@@ -241,7 +254,7 @@
 
 			amdgpu_uvd_note_usage(adev);
 
-			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
 			if (r) {
 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
 				continue;
@@ -295,7 +308,8 @@
 
 			amdgpu_uvd_note_usage(adev);
 
-			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			r = amdgpu_uvd_get_destroy_msg(ring, handle,
+						       false, &fence);
 			if (r) {
 				DRM_ERROR("Error destroying UVD (%d)!\n", r);
 				continue;
@@ -525,13 +539,6 @@
 		return -EINVAL;
 	}
 
-	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
-						MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
-		return r;
-	}
-
 	r = amdgpu_bo_kmap(bo, &ptr);
 	if (r) {
 		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
@@ -616,7 +623,6 @@
 {
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_bo *bo;
-	struct amdgpu_ib *ib;
 	uint32_t cmd, lo, hi;
 	uint64_t start, end;
 	uint64_t addr;
@@ -638,9 +644,10 @@
 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
 	start += addr;
 
-	ib = &ctx->parser->ibs[ctx->ib_idx];
-	ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
-	ib->ptr[ctx->data1] = start >> 32;
+	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
+			    lower_32_bits(start));
+	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
+			    upper_32_bits(start));
 
 	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
 	if (cmd < 0x4) {
@@ -702,7 +709,7 @@
 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
 			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
 {
-	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
 	int i, r;
 
 	ctx->idx++;
@@ -748,7 +755,7 @@
 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
 				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
 {
-	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
 	int r;
 
 	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
@@ -790,7 +797,7 @@
 		[0x00000003]	=	2048,
 		[0x00000004]	=	0xFFFFFFFF,
 	};
-	struct amdgpu_ib *ib = &parser->ibs[ib_idx];
+	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
 	int r;
 
 	if (ib->length_dw % 16) {
@@ -823,22 +830,14 @@
 	return 0;
 }
 
-static int amdgpu_uvd_free_job(
-	struct amdgpu_job *job)
-{
-	amdgpu_ib_free(job->adev, job->ibs);
-	kfree(job->ibs);
-	return 0;
-}
-
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
-			       struct amdgpu_bo *bo,
-			       struct fence **fence)
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+			       bool direct, struct fence **fence)
 {
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
-	struct amdgpu_ib *ib = NULL;
+	struct amdgpu_job *job;
+	struct amdgpu_ib *ib;
 	struct fence *f = NULL;
 	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
@@ -862,15 +861,12 @@
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 	if (r)
 		goto err;
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib) {
-		r = -ENOMEM;
-		goto err;
-	}
-	r = amdgpu_ib_get(ring, NULL, 64, ib);
-	if (r)
-		goto err1;
 
+	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+	if (r)
+		goto err;
+
+	ib = &job->ibs[0];
 	addr = amdgpu_bo_gpu_offset(bo);
 	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
 	ib->ptr[1] = addr;
@@ -882,12 +878,19 @@
 		ib->ptr[i] = PACKET2(0);
 	ib->length_dw = 16;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_uvd_free_job,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
-	if (r)
-		goto err2;
+	if (direct) {
+		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+		job->fence = f;
+		if (r)
+			goto err_free;
+
+		amdgpu_job_free(job);
+	} else {
+		r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
+				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+		if (r)
+			goto err_free;
+	}
 
 	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
@@ -895,16 +898,12 @@
 		*fence = fence_get(f);
 	amdgpu_bo_unref(&bo);
 	fence_put(f);
-	if (amdgpu_enable_scheduler)
-		return 0;
 
-	amdgpu_ib_free(ring->adev, ib);
-	kfree(ib);
 	return 0;
-err2:
-	amdgpu_ib_free(ring->adev, ib);
-err1:
-	kfree(ib);
+
+err_free:
+	amdgpu_job_free(job);
+
 err:
 	ttm_eu_backoff_reservation(&ticket, &head);
 	return r;
@@ -959,11 +958,11 @@
 	amdgpu_bo_kunmap(bo);
 	amdgpu_bo_unreserve(bo);
 
-	return amdgpu_uvd_send_msg(ring, bo, fence);
+	return amdgpu_uvd_send_msg(ring, bo, true, fence);
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct fence **fence)
+			       bool direct, struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -1001,7 +1000,7 @@
 	amdgpu_bo_kunmap(bo);
 	amdgpu_bo_unreserve(bo);
 
-	return amdgpu_uvd_send_msg(ring, bo, fence);
+	return amdgpu_uvd_send_msg(ring, bo, direct, fence);
 }
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 1724c2c..9a3b449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -31,7 +31,7 @@
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 			      struct fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct fence **fence);
+			       bool direct, struct fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a745eee..4bec0c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -74,6 +74,8 @@
  */
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 {
+	struct amdgpu_ring *ring;
+	struct amd_sched_rq *rq;
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
 	unsigned ucode_version, version_major, version_minor, binary_id;
@@ -170,6 +172,16 @@
 		return r;
 	}
 
+
+	ring = &adev->vce.ring[0];
+	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+	r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
+				  rq, amdgpu_sched_jobs);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up VCE run queue.\n");
+		return r;
+	}
+
 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 		atomic_set(&adev->vce.handles[i], 0);
 		adev->vce.filp[i] = NULL;
@@ -190,6 +202,8 @@
 	if (adev->vce.vcpu_bo == NULL)
 		return 0;
 
+	amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+
 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
 
 	amdgpu_ring_fini(&adev->vce.ring[0]);
@@ -337,7 +351,7 @@
 
 		amdgpu_vce_note_usage(adev);
 
-		r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
+		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
 		if (r)
 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
 
@@ -346,14 +360,6 @@
 	}
 }
 
-static int amdgpu_vce_free_job(
-	struct amdgpu_job *job)
-{
-	amdgpu_ib_free(job->adev, job->ibs);
-	kfree(job->ibs);
-	return 0;
-}
-
 /**
  * amdgpu_vce_get_create_msg - generate a VCE create msg
  *
@@ -368,21 +374,17 @@
 			      struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib *ib = NULL;
+	struct amdgpu_job *job;
+	struct amdgpu_ib *ib;
 	struct fence *f = NULL;
-	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
-		return -ENOMEM;
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		kfree(ib);
+	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+	if (r)
 		return r;
-	}
+
+	ib = &job->ibs[0];
 
 	dummy = ib->gpu_addr + 1024;
 
@@ -423,20 +425,19 @@
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_vce_free_job,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+	job->fence = f;
 	if (r)
 		goto err;
+
+	amdgpu_job_free(job);
 	if (fence)
 		*fence = fence_get(f);
 	fence_put(f);
-	if (amdgpu_enable_scheduler)
-		return 0;
+	return 0;
+
 err:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 	return r;
 }
 
@@ -451,26 +452,20 @@
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct fence **fence)
+			       bool direct, struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib *ib = NULL;
+	struct amdgpu_job *job;
+	struct amdgpu_ib *ib;
 	struct fence *f = NULL;
-	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
-		return -ENOMEM;
-
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
-	if (r) {
-		kfree(ib);
-		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+	if (r)
 		return r;
-	}
 
+	ib = &job->ibs[0];
 	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE destroy msg */
@@ -490,20 +485,28 @@
 
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_vce_free_job,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
-	if (r)
-		goto err;
+
+	if (direct) {
+		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+		job->fence = f;
+		if (r)
+			goto err;
+
+		amdgpu_job_free(job);
+	} else {
+		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+		if (r)
+			goto err;
+	}
+
 	if (fence)
 		*fence = fence_get(f);
 	fence_put(f);
-	if (amdgpu_enable_scheduler)
-		return 0;
+	return 0;
+
 err:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 	return r;
 }
 
@@ -521,7 +524,6 @@
 			       int lo, int hi, unsigned size, uint32_t index)
 {
 	struct amdgpu_bo_va_mapping *mapping;
-	struct amdgpu_ib *ib = &p->ibs[ib_idx];
 	struct amdgpu_bo *bo;
 	uint64_t addr;
 
@@ -550,8 +552,8 @@
 	addr += amdgpu_bo_gpu_offset(bo);
 	addr -= ((uint64_t)size) * ((uint64_t)index);
 
-	ib->ptr[lo] = addr & 0xFFFFFFFF;
-	ib->ptr[hi] = addr >> 32;
+	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
+	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
 
 	return 0;
 }
@@ -606,7 +608,7 @@
  */
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 {
-	struct amdgpu_ib *ib = &p->ibs[ib_idx];
+	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
 	unsigned fb_idx = 0, bs_idx = 0;
 	int session_idx = -1;
 	bool destroyed = false;
@@ -743,30 +745,6 @@
 }
 
 /**
- * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
- *
- * @ring: engine to use
- * @semaphore: address of semaphore
- * @emit_wait: true=emit wait, false=emit signal
- *
- */
-bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
-				    struct amdgpu_semaphore *semaphore,
-				    bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-
-	amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
-	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-	amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
-	if (!emit_wait)
-		amdgpu_ring_write(ring, VCE_CMD_END);
-
-	return true;
-}
-
-/**
  * amdgpu_vce_ring_emit_ib - execute indirect buffer
  *
  * @ring: engine to use
@@ -814,14 +792,14 @@
 	unsigned i;
 	int r;
 
-	r = amdgpu_ring_lock(ring, 16);
+	r = amdgpu_ring_alloc(ring, 16);
 	if (r) {
 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
 			  ring->idx, r);
 		return r;
 	}
 	amdgpu_ring_write(ring, VCE_CMD_END);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (amdgpu_ring_get_rptr(ring) != rptr)
@@ -862,7 +840,7 @@
 		goto error;
 	}
 
-	r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
+	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
 		goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index ba2da8e..ef99d23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -31,12 +31,9 @@
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 			      struct fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct fence **fence);
+			       bool direct, struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
-				    struct amdgpu_semaphore *semaphore,
-				    bool emit_wait);
 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 				unsigned flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9599f75..b6c011b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -50,12 +50,15 @@
  * SI supports 16.
  */
 
+/* Special value that no flush is necessary */
+#define AMDGPU_VM_NO_FLUSH (~0ll)
+
 /**
  * amdgpu_vm_num_pde - return the number of page directory entries
  *
  * @adev: amdgpu_device pointer
  *
- * Calculate the number of page directory entries (cayman+).
+ * Calculate the number of page directory entries.
  */
 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
 {
@@ -67,7 +70,7 @@
  *
  * @adev: amdgpu_device pointer
  *
- * Calculate the size of the page directory in bytes (cayman+).
+ * Calculate the size of the page directory in bytes.
  */
 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
 {
@@ -89,11 +92,10 @@
 			 struct amdgpu_bo_list_entry *entry)
 {
 	entry->robj = vm->page_directory;
-	entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
-	entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
 	entry->priority = 0;
 	entry->tv.bo = &vm->page_directory->tbo;
 	entry->tv.shared = true;
+	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
 }
 
@@ -154,133 +156,154 @@
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
  *
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
- *
- * Global mutex must be locked!
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync)
+		      struct amdgpu_sync *sync, struct fence *fence,
+		      unsigned *vm_id, uint64_t *vm_pd_addr)
 {
-	struct fence *best[AMDGPU_MAX_RINGS] = {};
-	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_vm_id *id = &vm->ids[ring->idx];
+	struct fence *updates = sync->last_vm_update;
+	int r;
 
-	unsigned choices[2] = {};
-	unsigned i;
+	mutex_lock(&adev->vm_manager.lock);
 
 	/* check if the id is still valid */
-	if (vm_id->id) {
-		unsigned id = vm_id->id;
+	if (id->mgr_id) {
+		struct fence *flushed = id->flushed_updates;
+		bool is_later;
 		long owner;
 
-		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
-		if (owner == (long)vm) {
-			trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+		if (!flushed)
+			is_later = true;
+		else if (!updates)
+			is_later = false;
+		else
+			is_later = fence_is_later(updates, flushed);
+
+		owner = atomic_long_read(&id->mgr_id->owner);
+		if (!is_later && owner == (long)id &&
+		    pd_addr == id->pd_gpu_addr) {
+
+			r = amdgpu_sync_fence(ring->adev, sync,
+					      id->mgr_id->active);
+			if (r) {
+				mutex_unlock(&adev->vm_manager.lock);
+				return r;
+			}
+
+			fence_put(id->mgr_id->active);
+			id->mgr_id->active = fence_get(fence);
+
+			list_move_tail(&id->mgr_id->list,
+				       &adev->vm_manager.ids_lru);
+
+			*vm_id = id->mgr_id - adev->vm_manager.ids;
+			*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
+			trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
+						*vm_pd_addr);
+
+			mutex_unlock(&adev->vm_manager.lock);
 			return 0;
 		}
 	}
 
-	/* we definately need to flush */
-	vm_id->pd_gpu_addr = ~0ll;
+	id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
+				      struct amdgpu_vm_manager_id,
+				      list);
 
-	/* skip over VMID 0, since it is the system VM */
-	for (i = 1; i < adev->vm_manager.nvm; ++i) {
-		struct fence *fence = adev->vm_manager.ids[i].active;
-		struct amdgpu_ring *fring;
+	r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
+	if (!r) {
+		fence_put(id->mgr_id->active);
+		id->mgr_id->active = fence_get(fence);
 
-		if (fence == NULL) {
-			/* found a free one */
-			vm_id->id = i;
-			trace_amdgpu_vm_grab_id(i, ring->idx);
-			return 0;
-		}
+		fence_put(id->flushed_updates);
+		id->flushed_updates = fence_get(updates);
 
-		fring = amdgpu_ring_from_fence(fence);
-		if (best[fring->idx] == NULL ||
-		    fence_is_later(best[fring->idx], fence)) {
-			best[fring->idx] = fence;
-			choices[fring == ring ? 0 : 1] = i;
-		}
+		id->pd_gpu_addr = pd_addr;
+
+		list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
+		atomic_long_set(&id->mgr_id->owner, (long)id);
+
+		*vm_id = id->mgr_id - adev->vm_manager.ids;
+		*vm_pd_addr = pd_addr;
+		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
 	}
 
-	for (i = 0; i < 2; ++i) {
-		if (choices[i]) {
-			struct fence *fence;
-
-			fence  = adev->vm_manager.ids[choices[i]].active;
-			vm_id->id = choices[i];
-
-			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
-			return amdgpu_sync_fence(ring->adev, sync, fence);
-		}
-	}
-
-	/* should never happen */
-	BUG();
-	return -EINVAL;
+	mutex_unlock(&adev->vm_manager.lock);
+	return r;
 }
 
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
  * @ring: ring to use for flush
- * @vm: vm we want to flush
- * @updates: last vm update that we waited for
+ * @vm_id: vmid number to use
+ * @pd_addr: address of the page directory
  *
- * Flush the vm (cayman+).
- *
- * Global and local mutex must be locked!
+ * Emit a VM flush when it is necessary.
  */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
-		     struct amdgpu_vm *vm,
-		     struct fence *updates)
+		     unsigned vm_id, uint64_t pd_addr,
+		     uint32_t gds_base, uint32_t gds_size,
+		     uint32_t gws_base, uint32_t gws_size,
+		     uint32_t oa_base, uint32_t oa_size)
 {
-	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
-	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
-	struct fence *flushed_updates = vm_id->flushed_updates;
-	bool is_later;
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
+		mgr_id->gds_base != gds_base ||
+		mgr_id->gds_size != gds_size ||
+		mgr_id->gws_base != gws_base ||
+		mgr_id->gws_size != gws_size ||
+		mgr_id->oa_base != oa_base ||
+		mgr_id->oa_size != oa_size);
 
-	if (!flushed_updates)
-		is_later = true;
-	else if (!updates)
-		is_later = false;
-	else
-		is_later = fence_is_later(updates, flushed_updates);
+	if (ring->funcs->emit_pipeline_sync && (
+	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
+		amdgpu_ring_emit_pipeline_sync(ring);
 
-	if (pd_addr != vm_id->pd_gpu_addr || is_later) {
-		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-		if (is_later) {
-			vm_id->flushed_updates = fence_get(updates);
-			fence_put(flushed_updates);
-		}
-		vm_id->pd_gpu_addr = pd_addr;
-		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
+	if (pd_addr != AMDGPU_VM_NO_FLUSH) {
+		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
+		amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+	}
+
+	if (gds_switch_needed) {
+		mgr_id->gds_base = gds_base;
+		mgr_id->gds_size = gds_size;
+		mgr_id->gws_base = gws_base;
+		mgr_id->gws_size = gws_size;
+		mgr_id->oa_base = oa_base;
+		mgr_id->oa_size = oa_size;
+		amdgpu_ring_emit_gds_switch(ring, vm_id,
+					    gds_base, gds_size,
+					    gws_base, gws_size,
+					    oa_base, oa_size);
 	}
 }
 
 /**
- * amdgpu_vm_fence - remember fence for vm
+ * amdgpu_vm_reset_id - reset VMID to zero
  *
- * @adev: amdgpu_device pointer
- * @vm: vm we want to fence
- * @fence: fence to remember
+ * @adev: amdgpu device structure
+ * @vm_id: vmid number to use
  *
- * Fence the vm (cayman+).
- * Set the fence used to protect page table and id.
- *
- * Global and local mutex must be locked!
+ * Reset saved GDW, GWS and OA to force switch on next flush.
  */
-void amdgpu_vm_fence(struct amdgpu_device *adev,
-		     struct amdgpu_vm *vm,
-		     struct fence *fence)
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
 {
-	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
-	unsigned vm_id = vm->ids[ring->idx].id;
+	struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
 
-	fence_put(adev->vm_manager.ids[vm_id].active);
-	adev->vm_manager.ids[vm_id].active = fence_get(fence);
-	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
+	mgr_id->gds_base = 0;
+	mgr_id->gds_size = 0;
+	mgr_id->gws_base = 0;
+	mgr_id->gws_size = 0;
+	mgr_id->oa_base = 0;
+	mgr_id->oa_size = 0;
 }
 
 /**
@@ -289,7 +312,7 @@
  * @vm: requested vm
  * @bo: requested buffer object
  *
- * Find @bo inside the requested vm (cayman+).
+ * Find @bo inside the requested vm.
  * Search inside the @bos vm list for the requested vm
  * Returns the found bo_va or NULL if none is found
  *
@@ -312,32 +335,40 @@
  * amdgpu_vm_update_pages - helper to call the right asic function
  *
  * @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw access flags
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
  * @flags: hw access flags
- * @gtt_flags: GTT hw access flags
  *
  * Traces the parameters and calls the right asic functions
  * to setup the page table using the DMA.
  */
 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
+				   struct amdgpu_gart *gtt,
+				   uint32_t gtt_flags,
 				   struct amdgpu_ib *ib,
 				   uint64_t pe, uint64_t addr,
 				   unsigned count, uint32_t incr,
-				   uint32_t flags, uint32_t gtt_flags)
+				   uint32_t flags)
 {
 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
 
-	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
-		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
+	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
+		uint64_t src = gtt->table_addr + (addr >> 12) * 8;
 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
 
-	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
-		amdgpu_vm_write_pte(adev, ib, pe, addr,
-				      count, incr, flags);
+	} else if (gtt) {
+		dma_addr_t *pages_addr = gtt->pages_addr;
+		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
+				    count, incr, flags);
+
+	} else if (count < 3) {
+		amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+				    count, incr, flags);
 
 	} else {
 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
@@ -345,15 +376,6 @@
 	}
 }
 
-int amdgpu_vm_free_job(struct amdgpu_job *job)
-{
-	int i;
-	for (i = 0; i < job->num_ibs; i++)
-		amdgpu_ib_free(job->adev, &job->ibs[i]);
-	kfree(job->ibs);
-	return 0;
-}
-
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -363,15 +385,18 @@
  * need to reserve bo first before calling it.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+			      struct amdgpu_vm *vm,
 			      struct amdgpu_bo *bo)
 {
-	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	struct amdgpu_ring *ring;
 	struct fence *fence = NULL;
-	struct amdgpu_ib *ib;
+	struct amdgpu_job *job;
 	unsigned entries;
 	uint64_t addr;
 	int r;
 
+	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+
 	r = reservation_object_reserve_shared(bo->tbo.resv);
 	if (r)
 		return r;
@@ -383,56 +408,57 @@
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
+	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+	if (r)
 		goto error;
 
-	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
+	amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
+			       0, 0);
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+
+	WARN_ON(job->ibs[0].length_dw > 64);
+	r = amdgpu_job_submit(job, ring, &vm->entity,
+			      AMDGPU_FENCE_OWNER_VM, &fence);
 	if (r)
 		goto error_free;
 
-	ib->length_dw = 0;
-
-	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
-	amdgpu_vm_pad_ib(adev, ib);
-	WARN_ON(ib->length_dw > 64);
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_vm_free_job,
-						 AMDGPU_FENCE_OWNER_VM,
-						 &fence);
-	if (!r)
-		amdgpu_bo_fence(bo, fence, true);
+	amdgpu_bo_fence(bo, fence, true);
 	fence_put(fence);
-	if (amdgpu_enable_scheduler)
-		return 0;
+	return 0;
 
 error_free:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 
 error:
 	return r;
 }
 
 /**
- * amdgpu_vm_map_gart - get the physical address of a gart page
+ * amdgpu_vm_map_gart - Resolve gart mapping of addr
  *
- * @adev: amdgpu_device pointer
+ * @pages_addr: optional DMA address to use for lookup
  * @addr: the unmapped addr
  *
  * Look up the physical address of the page that the pte resolves
- * to (cayman+).
- * Returns the physical address of the page.
+ * to and return the pointer for the page table entry.
  */
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
 	uint64_t result;
 
-	/* page table offset */
-	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
+	if (pages_addr) {
+		/* page table offset */
+		result = pages_addr[addr >> PAGE_SHIFT];
 
-	/* in case cpu page size != gpu page size*/
-	result |= addr & (~PAGE_MASK);
+		/* in case cpu page size != gpu page size*/
+		result |= addr & (~PAGE_MASK);
+
+	} else {
+		/* No mapping required */
+		result = addr;
+	}
+
+	result &= 0xFFFFFFFFFFFFF000ULL;
 
 	return result;
 }
@@ -446,45 +472,37 @@
  * @end: end of GPU address range
  *
  * Allocates new page tables if necessary
- * and updates the page directory (cayman+).
+ * and updates the page directory.
  * Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
  */
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 				    struct amdgpu_vm *vm)
 {
-	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	struct amdgpu_ring *ring;
 	struct amdgpu_bo *pd = vm->page_directory;
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
+	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct fence *fence = NULL;
 
 	int r;
 
+	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+
 	/* padding, etc. */
 	ndw = 64;
 
 	/* assume the worst case */
 	ndw += vm->max_pde_used * 6;
 
-	/* update too big for an IB */
-	if (ndw > 0xfffff)
-		return -ENOMEM;
-
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
-		return -ENOMEM;
-
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-	if (r) {
-		kfree(ib);
+	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+	if (r)
 		return r;
-	}
-	ib->length_dw = 0;
+
+	ib = &job->ibs[0];
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -504,9 +522,10 @@
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, ib, last_pde,
-						       last_pt, count, incr,
-						       AMDGPU_PTE_VALID, 0);
+				amdgpu_vm_update_pages(adev, NULL, 0, ib,
+						       last_pde, last_pt,
+						       count, incr,
+						       AMDGPU_PTE_VALID);
 			}
 
 			count = 1;
@@ -518,17 +537,16 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
-				       incr, AMDGPU_PTE_VALID, 0);
+		amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
+				       count, incr, AMDGPU_PTE_VALID);
 
 	if (ib->length_dw != 0) {
-		amdgpu_vm_pad_ib(adev, ib);
-		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+		amdgpu_ring_pad_ib(ring, ib);
+		amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+				 AMDGPU_FENCE_OWNER_VM);
 		WARN_ON(ib->length_dw > ndw);
-		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-							 &amdgpu_vm_free_job,
-							 AMDGPU_FENCE_OWNER_VM,
-							 &fence);
+		r = amdgpu_job_submit(job, ring, &vm->entity,
+				      AMDGPU_FENCE_OWNER_VM, &fence);
 		if (r)
 			goto error_free;
 
@@ -536,18 +554,15 @@
 		fence_put(vm->page_directory_fence);
 		vm->page_directory_fence = fence_get(fence);
 		fence_put(fence);
-	}
 
-	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
-		amdgpu_ib_free(adev, ib);
-		kfree(ib);
+	} else {
+		amdgpu_job_free(job);
 	}
 
 	return 0;
 
 error_free:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 	return r;
 }
 
@@ -555,20 +570,20 @@
  * amdgpu_vm_frag_ptes - add fragment information to PTEs
  *
  * @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw mapping flags
  * @ib: IB for the update
  * @pe_start: first PTE to handle
  * @pe_end: last PTE to handle
  * @addr: addr those PTEs should point to
  * @flags: hw mapping flags
- * @gtt_flags: GTT hw mapping flags
- *
- * Global and local mutex must be locked!
  */
 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
+				struct amdgpu_gart *gtt,
+				uint32_t gtt_flags,
 				struct amdgpu_ib *ib,
 				uint64_t pe_start, uint64_t pe_end,
-				uint64_t addr, uint32_t flags,
-				uint32_t gtt_flags)
+				uint64_t addr, uint32_t flags)
 {
 	/**
 	 * The MC L1 TLB supports variable sized pages, based on a fragment
@@ -598,36 +613,39 @@
 
 	unsigned count;
 
+	/* Abort early if there isn't anything to do */
+	if (pe_start == pe_end)
+		return;
+
 	/* system pages are non continuously */
-	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
-	    (frag_start >= frag_end)) {
+	if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
 
 		count = (pe_end - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
-				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+		amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
+				       addr, count, AMDGPU_GPU_PAGE_SIZE,
+				       flags);
 		return;
 	}
 
 	/* handle the 4K area at the beginning */
 	if (pe_start != frag_start) {
 		count = (frag_start - pe_start) / 8;
-		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
-				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+		amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
+				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 		addr += AMDGPU_GPU_PAGE_SIZE * count;
 	}
 
 	/* handle the area in the middle */
 	count = (frag_end - frag_start) / 8;
-	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
-			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
-			       gtt_flags);
+	amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
+			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
 
 	/* handle the 4K area at the end */
 	if (frag_end != pe_end) {
 		addr += AMDGPU_GPU_PAGE_SIZE * count;
 		count = (pe_end - frag_end) / 8;
-		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
-				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+		amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
+				       count, AMDGPU_GPU_PAGE_SIZE, flags);
 	}
 }
 
@@ -635,122 +653,105 @@
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
  * @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw mapping flags
  * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
  * @dst: destination address to map to
  * @flags: mapping flags
  *
- * Update the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
+ * Update the page tables in the range @start - @end.
  */
-static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
-				 struct amdgpu_vm *vm,
-				 struct amdgpu_ib *ib,
-				 uint64_t start, uint64_t end,
-				 uint64_t dst, uint32_t flags,
-				 uint32_t gtt_flags)
+static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
+				  struct amdgpu_gart *gtt,
+				  uint32_t gtt_flags,
+				  struct amdgpu_vm *vm,
+				  struct amdgpu_ib *ib,
+				  uint64_t start, uint64_t end,
+				  uint64_t dst, uint32_t flags)
 {
-	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
-	uint64_t last_pte = ~0, last_dst = ~0;
-	void *owner = AMDGPU_FENCE_OWNER_VM;
-	unsigned count = 0;
-	uint64_t addr;
+	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
-	/* sync to everything on unmapping */
-	if (!(flags & AMDGPU_PTE_VALID))
-		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+	uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
+	uint64_t addr;
 
 	/* walk over the address space and update the page tables */
 	for (addr = start; addr < end; ) {
 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
 		unsigned nptes;
-		uint64_t pte;
-		int r;
-
-		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
-		r = reservation_object_reserve_shared(pt->tbo.resv);
-		if (r)
-			return r;
+		uint64_t pe_start;
 
 		if ((addr & ~mask) == (end & ~mask))
 			nptes = end - addr;
 		else
 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 
-		pte = amdgpu_bo_gpu_offset(pt);
-		pte += (addr & mask) * 8;
+		pe_start = amdgpu_bo_gpu_offset(pt);
+		pe_start += (addr & mask) * 8;
 
-		if ((last_pte + 8 * count) != pte) {
+		if (last_pe_end != pe_start) {
 
-			if (count) {
-				amdgpu_vm_frag_ptes(adev, ib, last_pte,
-						    last_pte + 8 * count,
-						    last_dst, flags,
-						    gtt_flags);
-			}
+			amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+					    last_pe_start, last_pe_end,
+					    last_dst, flags);
 
-			count = nptes;
-			last_pte = pte;
+			last_pe_start = pe_start;
+			last_pe_end = pe_start + 8 * nptes;
 			last_dst = dst;
 		} else {
-			count += nptes;
+			last_pe_end += 8 * nptes;
 		}
 
 		addr += nptes;
 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 	}
 
-	if (count) {
-		amdgpu_vm_frag_ptes(adev, ib, last_pte,
-				    last_pte + 8 * count,
-				    last_dst, flags, gtt_flags);
-	}
-
-	return 0;
+	amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+			    last_pe_start, last_pe_end,
+			    last_dst, flags);
 }
 
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @mapping: mapped range and flags to use for the update
- * @addr: addr to set the area to
+ * @gtt: GART instance to use for mapping
  * @gtt_flags: flags as they are used for GTT
+ * @vm: requested vm
+ * @start: start of mapped range
+ * @last: last mapped entry
+ * @flags: flags for the entries
+ * @addr: addr to set the area to
  * @fence: optional resulting fence
  *
- * Fill in the page table entries for @mapping.
+ * Fill in the page table entries between @start and @last.
  * Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+				       struct amdgpu_gart *gtt,
+				       uint32_t gtt_flags,
 				       struct amdgpu_vm *vm,
-				       struct amdgpu_bo_va_mapping *mapping,
-				       uint64_t addr, uint32_t gtt_flags,
+				       uint64_t start, uint64_t last,
+				       uint32_t flags, uint64_t addr,
 				       struct fence **fence)
 {
-	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	struct amdgpu_ring *ring;
+	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned nptes, ncmds, ndw;
-	uint32_t flags = gtt_flags;
+	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct fence *f = NULL;
 	int r;
 
-	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
-	 * but in case of something, we filter the flags in first place
-	 */
-	if (!(mapping->flags & AMDGPU_PTE_READABLE))
-		flags &= ~AMDGPU_PTE_READABLE;
-	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
-		flags &= ~AMDGPU_PTE_WRITEABLE;
+	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
-	trace_amdgpu_vm_bo_update(mapping);
+	/* sync to everything on unmapping */
+	if (!(flags & AMDGPU_PTE_VALID))
+		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
 
-	nptes = mapping->it.last - mapping->it.start + 1;
+	nptes = last - start + 1;
 
 	/*
 	 * reserve space for one command every (1 << BLOCK_SIZE)
@@ -761,11 +762,11 @@
 	/* padding, etc. */
 	ndw = 64;
 
-	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
+	if ((gtt == &adev->gart) && (flags == gtt_flags)) {
 		/* only copy commands needed */
 		ndw += ncmds * 7;
 
-	} else if (flags & AMDGPU_PTE_SYSTEM) {
+	} else if (gtt) {
 		/* header for write data commands */
 		ndw += ncmds * 4;
 
@@ -780,38 +781,28 @@
 		ndw += 2 * 10;
 	}
 
-	/* update too big for an IB */
-	if (ndw > 0xfffff)
-		return -ENOMEM;
-
-	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!ib)
-		return -ENOMEM;
-
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-	if (r) {
-		kfree(ib);
+	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+	if (r)
 		return r;
-	}
 
-	ib->length_dw = 0;
+	ib = &job->ibs[0];
 
-	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
-				  mapping->it.last + 1, addr + mapping->offset,
-				  flags, gtt_flags);
+	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+			     owner);
+	if (r)
+		goto error_free;
 
-	if (r) {
-		amdgpu_ib_free(adev, ib);
-		kfree(ib);
-		return r;
-	}
+	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
+	if (r)
+		goto error_free;
 
-	amdgpu_vm_pad_ib(adev, ib);
+	amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
+			      addr, flags);
+
+	amdgpu_ring_pad_ib(ring, ib);
 	WARN_ON(ib->length_dw > ndw);
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-						 &amdgpu_vm_free_job,
-						 AMDGPU_FENCE_OWNER_VM,
-						 &f);
+	r = amdgpu_job_submit(job, ring, &vm->entity,
+			      AMDGPU_FENCE_OWNER_VM, &f);
 	if (r)
 		goto error_free;
 
@@ -821,19 +812,76 @@
 		*fence = fence_get(f);
 	}
 	fence_put(f);
-	if (!amdgpu_enable_scheduler) {
-		amdgpu_ib_free(adev, ib);
-		kfree(ib);
-	}
 	return 0;
 
 error_free:
-	amdgpu_ib_free(adev, ib);
-	kfree(ib);
+	amdgpu_job_free(job);
 	return r;
 }
 
 /**
+ * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
+ *
+ * @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @vm: requested vm
+ * @mapping: mapped range and flags to use for the update
+ * @addr: addr to set the area to
+ * @gtt_flags: flags as they are used for GTT
+ * @fence: optional resulting fence
+ *
+ * Split the mapping into smaller chunks so that each update fits
+ * into a SDMA IB.
+ * Returns 0 for success, -EINVAL for failure.
+ */
+static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+				      struct amdgpu_gart *gtt,
+				      uint32_t gtt_flags,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo_va_mapping *mapping,
+				      uint64_t addr, struct fence **fence)
+{
+	const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
+
+	uint64_t start = mapping->it.start;
+	uint32_t flags = gtt_flags;
+	int r;
+
+	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
+	 * but in case of something, we filter the flags in first place
+	 */
+	if (!(mapping->flags & AMDGPU_PTE_READABLE))
+		flags &= ~AMDGPU_PTE_READABLE;
+	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+		flags &= ~AMDGPU_PTE_WRITEABLE;
+
+	trace_amdgpu_vm_bo_update(mapping);
+
+	addr += mapping->offset;
+
+	if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
+		return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+						   start, mapping->it.last,
+						   flags, addr, fence);
+
+	while (start != mapping->it.last + 1) {
+		uint64_t last;
+
+		last = min((uint64_t)mapping->it.last, start + max_size - 1);
+		r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+						start, last, flags, addr,
+						fence);
+		if (r)
+			return r;
+
+		start = last + 1;
+		addr += max_size * AMDGPU_GPU_PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+/**
  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
  *
  * @adev: amdgpu_device pointer
@@ -851,14 +899,25 @@
 {
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_gart *gtt = NULL;
 	uint32_t flags;
 	uint64_t addr;
 	int r;
 
 	if (mem) {
 		addr = (u64)mem->start << PAGE_SHIFT;
-		if (mem->mem_type != TTM_PL_TT)
+		switch (mem->mem_type) {
+		case TTM_PL_TT:
+			gtt = &bo_va->bo->adev->gart;
+			break;
+
+		case TTM_PL_VRAM:
 			addr += adev->vm_manager.vram_base_offset;
+			break;
+
+		default:
+			break;
+		}
 	} else {
 		addr = 0;
 	}
@@ -871,8 +930,8 @@
 	spin_unlock(&vm->status_lock);
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
-		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
-						flags, &bo_va->last_pt_update);
+		r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
+					       &bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
@@ -912,21 +971,18 @@
 	struct amdgpu_bo_va_mapping *mapping;
 	int r;
 
-	spin_lock(&vm->freed_lock);
 	while (!list_empty(&vm->freed)) {
 		mapping = list_first_entry(&vm->freed,
 			struct amdgpu_bo_va_mapping, list);
 		list_del(&mapping->list);
-		spin_unlock(&vm->freed_lock);
-		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
+
+		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
+					       0, NULL);
 		kfree(mapping);
 		if (r)
 			return r;
 
-		spin_lock(&vm->freed_lock);
 	}
-	spin_unlock(&vm->freed_lock);
-
 	return 0;
 
 }
@@ -953,9 +1009,8 @@
 		bo_va = list_first_entry(&vm->invalidated,
 			struct amdgpu_bo_va, vm_status);
 		spin_unlock(&vm->status_lock);
-		mutex_lock(&bo_va->mutex);
+
 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
-		mutex_unlock(&bo_va->mutex);
 		if (r)
 			return r;
 
@@ -976,7 +1031,7 @@
  * @vm: requested vm
  * @bo: amdgpu buffer object
  *
- * Add @bo into the requested vm (cayman+).
+ * Add @bo into the requested vm.
  * Add @bo to the list of bos associated with the vm
  * Returns newly added bo_va or NULL for failure
  *
@@ -999,7 +1054,7 @@
 	INIT_LIST_HEAD(&bo_va->valids);
 	INIT_LIST_HEAD(&bo_va->invalids);
 	INIT_LIST_HEAD(&bo_va->vm_status);
-	mutex_init(&bo_va->mutex);
+
 	list_add_tail(&bo_va->bo_list, &bo->va);
 
 	return bo_va;
@@ -1051,9 +1106,7 @@
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
-	spin_lock(&vm->it_lock);
 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
-	spin_unlock(&vm->it_lock);
 	if (it) {
 		struct amdgpu_bo_va_mapping *tmp;
 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1077,13 +1130,8 @@
 	mapping->offset = offset;
 	mapping->flags = flags;
 
-	mutex_lock(&bo_va->mutex);
 	list_add(&mapping->list, &bo_va->invalids);
-	mutex_unlock(&bo_va->mutex);
-	spin_lock(&vm->it_lock);
 	interval_tree_insert(&mapping->it, &vm->va);
-	spin_unlock(&vm->it_lock);
-	trace_amdgpu_vm_bo_map(bo_va, mapping);
 
 	/* Make sure the page tables are allocated */
 	saddr >>= amdgpu_vm_block_size;
@@ -1117,18 +1165,17 @@
 		 */
 		pt->parent = amdgpu_bo_ref(vm->page_directory);
 
-		r = amdgpu_vm_clear_bo(adev, pt);
+		r = amdgpu_vm_clear_bo(adev, vm, pt);
 		if (r) {
 			amdgpu_bo_unref(&pt);
 			goto error_free;
 		}
 
 		entry->robj = pt;
-		entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
-		entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
 		entry->priority = 0;
 		entry->tv.bo = &entry->robj->tbo;
 		entry->tv.shared = true;
+		entry->user_pages = NULL;
 		vm->page_tables[pt_idx].addr = 0;
 	}
 
@@ -1136,9 +1183,7 @@
 
 error_free:
 	list_del(&mapping->list);
-	spin_lock(&vm->it_lock);
 	interval_tree_remove(&mapping->it, &vm->va);
-	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 	kfree(mapping);
 
@@ -1167,7 +1212,7 @@
 	bool valid = true;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
-	mutex_lock(&bo_va->mutex);
+
 	list_for_each_entry(mapping, &bo_va->valids, list) {
 		if (mapping->it.start == saddr)
 			break;
@@ -1181,25 +1226,18 @@
 				break;
 		}
 
-		if (&mapping->list == &bo_va->invalids) {
-			mutex_unlock(&bo_va->mutex);
+		if (&mapping->list == &bo_va->invalids)
 			return -ENOENT;
-		}
 	}
-	mutex_unlock(&bo_va->mutex);
+
 	list_del(&mapping->list);
-	spin_lock(&vm->it_lock);
 	interval_tree_remove(&mapping->it, &vm->va);
-	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
-	if (valid) {
-		spin_lock(&vm->freed_lock);
+	if (valid)
 		list_add(&mapping->list, &vm->freed);
-		spin_unlock(&vm->freed_lock);
-	} else {
+	else
 		kfree(mapping);
-	}
 
 	return 0;
 }
@@ -1210,7 +1248,7 @@
  * @adev: amdgpu_device pointer
  * @bo_va: requested bo_va
  *
- * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the requested vm.
  *
  * Object have to be reserved!
  */
@@ -1228,23 +1266,17 @@
 
 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
-		spin_lock(&vm->it_lock);
 		interval_tree_remove(&mapping->it, &vm->va);
-		spin_unlock(&vm->it_lock);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
-		spin_lock(&vm->freed_lock);
 		list_add(&mapping->list, &vm->freed);
-		spin_unlock(&vm->freed_lock);
 	}
 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
 		list_del(&mapping->list);
-		spin_lock(&vm->it_lock);
 		interval_tree_remove(&mapping->it, &vm->va);
-		spin_unlock(&vm->it_lock);
 		kfree(mapping);
 	}
+
 	fence_put(bo_va->last_pt_update);
-	mutex_destroy(&bo_va->mutex);
 	kfree(bo_va);
 }
 
@@ -1255,7 +1287,7 @@
  * @vm: requested vm
  * @bo: amdgpu buffer object
  *
- * Mark @bo as invalid (cayman+).
+ * Mark @bo as invalid.
  */
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo)
@@ -1276,17 +1308,20 @@
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  *
- * Init @vm fields (cayman+).
+ * Init @vm fields.
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
 		AMDGPU_VM_PTE_COUNT * 8);
 	unsigned pd_size, pd_entries;
+	unsigned ring_instance;
+	struct amdgpu_ring *ring;
+	struct amd_sched_rq *rq;
 	int i, r;
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		vm->ids[i].id = 0;
+		vm->ids[i].mgr_id = NULL;
 		vm->ids[i].flushed_updates = NULL;
 	}
 	vm->va = RB_ROOT;
@@ -1294,8 +1329,7 @@
 	INIT_LIST_HEAD(&vm->invalidated);
 	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
-	spin_lock_init(&vm->it_lock);
-	spin_lock_init(&vm->freed_lock);
+
 	pd_size = amdgpu_vm_directory_size(adev);
 	pd_entries = amdgpu_vm_num_pdes(adev);
 
@@ -1306,6 +1340,17 @@
 		return -ENOMEM;
 	}
 
+	/* create scheduler entity for page table updates */
+
+	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
+	ring_instance %= adev->vm_manager.vm_pte_num_rings;
+	ring = adev->vm_manager.vm_pte_rings[ring_instance];
+	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+	r = amd_sched_entity_init(&ring->sched, &vm->entity,
+				  rq, amdgpu_sched_jobs);
+	if (r)
+		return r;
+
 	vm->page_directory_fence = NULL;
 
 	r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1313,22 +1358,27 @@
 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
 			     NULL, NULL, &vm->page_directory);
 	if (r)
-		return r;
+		goto error_free_sched_entity;
+
 	r = amdgpu_bo_reserve(vm->page_directory, false);
-	if (r) {
-		amdgpu_bo_unref(&vm->page_directory);
-		vm->page_directory = NULL;
-		return r;
-	}
-	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+	if (r)
+		goto error_free_page_directory;
+
+	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
 	amdgpu_bo_unreserve(vm->page_directory);
-	if (r) {
-		amdgpu_bo_unref(&vm->page_directory);
-		vm->page_directory = NULL;
-		return r;
-	}
+	if (r)
+		goto error_free_page_directory;
 
 	return 0;
+
+error_free_page_directory:
+	amdgpu_bo_unref(&vm->page_directory);
+	vm->page_directory = NULL;
+
+error_free_sched_entity:
+	amd_sched_entity_fini(&ring->sched, &vm->entity);
+
+	return r;
 }
 
 /**
@@ -1337,7 +1387,7 @@
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  *
- * Tear down @vm (cayman+).
+ * Tear down @vm.
  * Unbind the VM and remove all bos from the vm bo list
  */
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
@@ -1345,6 +1395,8 @@
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
 	int i;
 
+	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+
 	if (!RB_EMPTY_ROOT(&vm->va)) {
 		dev_err(adev->dev, "still active bo inside vm\n");
 	}
@@ -1364,14 +1416,38 @@
 
 	amdgpu_bo_unref(&vm->page_directory);
 	fence_put(vm->page_directory_fence);
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		unsigned id = vm->ids[i].id;
 
-		atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
-				    (long)vm, 0);
-		fence_put(vm->ids[i].flushed_updates);
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_vm_id *id = &vm->ids[i];
+
+		if (id->mgr_id)
+			atomic_long_cmpxchg(&id->mgr_id->owner,
+					    (long)id, 0);
+		fence_put(id->flushed_updates);
+	}
+}
+
+/**
+ * amdgpu_vm_manager_init - init the VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
+		amdgpu_vm_reset_id(adev, i);
+		list_add_tail(&adev->vm_manager.ids[i].list,
+			      &adev->vm_manager.ids_lru);
 	}
 
+	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 474ca02..1f9109d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -3017,7 +3017,6 @@
 						      &memory_level->MinVddcPhases);
 
 	memory_level->EnabledForThrottle = 1;
-	memory_level->EnabledForActivity = 1;
 	memory_level->UpH = 0;
 	memory_level->DownH = 100;
 	memory_level->VoltageDownH = 0;
@@ -3376,7 +3375,6 @@
 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
-	graphic_level->EnabledForActivity = 1;
 
 	return 0;
 }
@@ -3407,6 +3405,7 @@
 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
 				PPSMC_DISPLAY_WATERMARK_HIGH;
 	}
+	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
 
 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -3450,6 +3449,8 @@
 			return ret;
 	}
 
+	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
 	if ((dpm_table->mclk_table.count >= 2) &&
 	    ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
 		pi->smc_state_table.MemoryLevel[1].MinVddc =
@@ -4381,26 +4382,6 @@
 				}
 			}
 		}
-		if ((!pi->pcie_dpm_key_disabled) &&
-		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
-			levels = 0;
-			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
-			while (tmp >>= 1)
-				levels++;
-			if (levels) {
-				ret = ci_dpm_force_state_pcie(adev, level);
-				if (ret)
-					return ret;
-				for (i = 0; i < adev->usec_timeout; i++) {
-					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
-					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
-					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
-					if (tmp == levels)
-						break;
-					udelay(1);
-				}
-			}
-		}
 	} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
 		if ((!pi->sclk_dpm_key_disabled) &&
 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -5395,30 +5376,6 @@
 
 	ci_update_current_ps(adev, boot_ps);
 
-	if (adev->irq.installed &&
-	    amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
-#if 0
-		PPSMC_Result result;
-#endif
-		ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
-						       CISLANDS_TEMP_RANGE_MAX);
-		if (ret) {
-			DRM_ERROR("ci_thermal_set_temperature_range failed\n");
-			return ret;
-		}
-		amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
-			       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
-		amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
-			       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
-#if 0
-		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
-
-		if (result != PPSMC_Result_OK)
-			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 155965e..bddc9ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1059,257 +1059,6 @@
 	return -EINVAL;
 }
 
-static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
-{
-	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
-		RREG32(mmGRBM_STATUS));
-	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
-		RREG32(mmGRBM_STATUS2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE0));
-	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE1));
-	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE3));
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
-		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
-	dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
-		 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
-	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
-	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT2));
-	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT3));
-	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
-		 RREG32(mmCP_CPF_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPF_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
-	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPC_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-}
-
-/**
- * cik_gpu_check_soft_reset - check which blocks are busy
- *
- * @adev: amdgpu_device pointer
- *
- * Check which blocks are busy and return the relevant reset
- * mask to be used by cik_gpu_soft_reset().
- * Returns a mask of the blocks to be reset.
- */
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
-{
-	u32 reset_mask = 0;
-	u32 tmp;
-
-	/* GRBM_STATUS */
-	tmp = RREG32(mmGRBM_STATUS);
-	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
-		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
-		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
-		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
-		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
-		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
-		reset_mask |= AMDGPU_RESET_GFX;
-
-	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
-		reset_mask |= AMDGPU_RESET_CP;
-
-	/* GRBM_STATUS2 */
-	tmp = RREG32(mmGRBM_STATUS2);
-	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_RLC;
-
-	/* SDMA0_STATUS_REG */
-	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
-	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
-		reset_mask |= AMDGPU_RESET_DMA;
-
-	/* SDMA1_STATUS_REG */
-	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
-	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
-		reset_mask |= AMDGPU_RESET_DMA1;
-
-	/* SRBM_STATUS2 */
-	tmp = RREG32(mmSRBM_STATUS2);
-	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_DMA;
-
-	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_DMA1;
-
-	/* SRBM_STATUS */
-	tmp = RREG32(mmSRBM_STATUS);
-
-	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_IH;
-
-	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_SEM;
-
-	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
-		reset_mask |= AMDGPU_RESET_GRBM;
-
-	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_VMC;
-
-	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
-		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
-		reset_mask |= AMDGPU_RESET_MC;
-
-	if (amdgpu_display_is_display_hung(adev))
-		reset_mask |= AMDGPU_RESET_DISPLAY;
-
-	/* Skip MC reset as it's mostly likely not hung, just busy */
-	if (reset_mask & AMDGPU_RESET_MC) {
-		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
-		reset_mask &= ~AMDGPU_RESET_MC;
-	}
-
-	return reset_mask;
-}
-
-/**
- * cik_gpu_soft_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- * @reset_mask: mask of which blocks to reset
- *
- * Soft reset the blocks specified in @reset_mask.
- */
-static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
-{
-	struct amdgpu_mode_mc_save save;
-	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
-	u32 tmp;
-
-	if (reset_mask == 0)
-		return;
-
-	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
-	cik_print_gpu_status_regs(adev);
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-
-	/* disable CG/PG */
-
-	/* stop the rlc */
-	gfx_v7_0_rlc_stop(adev);
-
-	/* Disable GFX parsing/prefetching */
-	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
-	/* Disable MEC parsing/prefetching */
-	WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
-	if (reset_mask & AMDGPU_RESET_DMA) {
-		/* sdma0 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-		tmp |= SDMA0_F32_CNTL__HALT_MASK;
-		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-	}
-	if (reset_mask & AMDGPU_RESET_DMA1) {
-		/* sdma1 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-		tmp |= SDMA0_F32_CNTL__HALT_MASK;
-		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-	}
-
-	gmc_v7_0_mc_stop(adev, &save);
-	if (amdgpu_asic_wait_for_mc_idle(adev)) {
-		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-	}
-
-	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
-		grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
-			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
-
-	if (reset_mask & AMDGPU_RESET_CP) {
-		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
-
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
-	}
-
-	if (reset_mask & AMDGPU_RESET_DMA)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-
-	if (reset_mask & AMDGPU_RESET_DMA1)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
-
-	if (reset_mask & AMDGPU_RESET_DISPLAY)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
-	if (reset_mask & AMDGPU_RESET_RLC)
-		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
-
-	if (reset_mask & AMDGPU_RESET_SEM)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
-
-	if (reset_mask & AMDGPU_RESET_IH)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
-
-	if (reset_mask & AMDGPU_RESET_GRBM)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
-
-	if (reset_mask & AMDGPU_RESET_VMC)
-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
-
-	if (!(adev->flags & AMD_IS_APU)) {
-		if (reset_mask & AMDGPU_RESET_MC)
-			srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
-	}
-
-	if (grbm_soft_reset) {
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-		tmp |= grbm_soft_reset;
-		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
-		WREG32(mmGRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-
-		udelay(50);
-
-		tmp &= ~grbm_soft_reset;
-		WREG32(mmGRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-	}
-
-	if (srbm_soft_reset) {
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-		tmp |= srbm_soft_reset;
-		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-		WREG32(mmSRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-
-		udelay(50);
-
-		tmp &= ~srbm_soft_reset;
-		WREG32(mmSRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-	}
-
-	/* Wait a little for things to settle down */
-	udelay(50);
-
-	gmc_v7_0_mc_resume(adev, &save);
-	udelay(50);
-
-	cik_print_gpu_status_regs(adev);
-}
-
 struct kv_reset_save_regs {
 	u32 gmcon_reng_execute;
 	u32 gmcon_misc;
@@ -1405,45 +1154,11 @@
 
 static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
-	struct amdgpu_mode_mc_save save;
 	struct kv_reset_save_regs kv_save = { 0 };
-	u32 tmp, i;
+	u32 i;
 
 	dev_info(adev->dev, "GPU pci config reset\n");
 
-	/* disable dpm? */
-
-	/* disable cg/pg */
-
-	/* Disable GFX parsing/prefetching */
-	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
-		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
-	/* Disable MEC parsing/prefetching */
-	WREG32(mmCP_MEC_CNTL,
-			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
-	/* sdma0 */
-	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-	tmp |= SDMA0_F32_CNTL__HALT_MASK;
-	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-	/* sdma1 */
-	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-	tmp |= SDMA0_F32_CNTL__HALT_MASK;
-	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-	/* XXX other engines? */
-
-	/* halt the rlc, disable cp internal ints */
-	gfx_v7_0_rlc_stop(adev);
-
-	udelay(50);
-
-	/* disable mem access */
-	gmc_v7_0_mc_stop(adev, &save);
-	if (amdgpu_asic_wait_for_mc_idle(adev)) {
-		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
-	}
-
 	if (adev->flags & AMD_IS_APU)
 		kv_save_regs_for_reset(adev, &kv_save);
 
@@ -1489,26 +1204,11 @@
  */
 static int cik_asic_reset(struct amdgpu_device *adev)
 {
-	u32 reset_mask;
+	cik_set_bios_scratch_engine_hung(adev, true);
 
-	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+	cik_gpu_pci_config_reset(adev);
 
-	if (reset_mask)
-		cik_set_bios_scratch_engine_hung(adev, true);
-
-	/* try soft reset */
-	cik_gpu_soft_reset(adev, reset_mask);
-
-	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
-
-	/* try pci config reset */
-	if (reset_mask && amdgpu_hard_reset)
-		cik_gpu_pci_config_reset(adev);
-
-	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
-
-	if (!reset_mask)
-		cik_set_bios_scratch_engine_hung(adev, false);
+	cik_set_bios_scratch_engine_hung(adev, false);
 
 	return 0;
 }
@@ -2328,8 +2028,6 @@
 
 	adev->asic_funcs = &cik_asic_funcs;
 
-	adev->has_uvd = true;
-
 	adev->rev_id = cik_get_rev_id(adev);
 	adev->external_rev_id = 0xFF;
 	switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c55ecf0..d3ac329 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -212,7 +212,7 @@
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 			   struct amdgpu_ib *ib)
 {
-	u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+	u32 extra_bits = ib->vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 4)
@@ -261,6 +261,13 @@
 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
+static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	amdgpu_ring_write(ring, mmHDP_DEBUG0);
+	amdgpu_ring_write(ring, 1);
+}
+
 /**
  * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
  *
@@ -295,30 +302,6 @@
 }
 
 /**
- * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (CIK).
- */
-static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	u64 addr = semaphore->gpu_addr;
-	u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
-
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
-	amdgpu_ring_write(ring, addr & 0xfffffff8);
-	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-
-	return true;
-}
-
-/**
  * cik_sdma_gfx_stop - stop the gfx async dma engines
  *
  * @adev: amdgpu_device pointer
@@ -417,6 +400,9 @@
 		cik_srbm_select(adev, 0, 0, 0, 0);
 		mutex_unlock(&adev->srbm_mutex);
 
+		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+		       adev->gfx.config.gb_addr_config & 0x70);
+
 		WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
@@ -584,7 +570,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
-	r = amdgpu_ring_lock(ring, 5);
+	r = amdgpu_ring_alloc(ring, 5);
 	if (r) {
 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 		amdgpu_wb_free(adev, index);
@@ -595,7 +581,7 @@
 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -645,7 +631,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		goto err0;
@@ -657,9 +643,7 @@
 	ib.ptr[3] = 1;
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -685,7 +669,8 @@
 
 err1:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -738,7 +723,7 @@
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
-				  uint64_t pe,
+				  const dma_addr_t *pages_addr, uint64_t pe,
 				  uint64_t addr, unsigned count,
 				  uint32_t incr, uint32_t flags)
 {
@@ -757,14 +742,7 @@
 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 		ib->ptr[ib->length_dw++] = ndw;
 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			if (flags & AMDGPU_PTE_SYSTEM) {
-				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-				value &= 0xFFFFFFFFFFFFF000ULL;
-			} else if (flags & AMDGPU_PTE_VALID) {
-				value = addr;
-			} else {
-				value = 0;
-			}
+			value = amdgpu_vm_map_gart(pages_addr, addr);
 			addr += incr;
 			value |= flags;
 			ib->ptr[ib->length_dw++] = value;
@@ -827,9 +805,9 @@
  * @ib: indirect buffer to fill with padding
  *
  */
-static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
+static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 	u32 pad_count;
 	int i;
 
@@ -845,6 +823,30 @@
 }
 
 /**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	/* wait for idle */
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
+					    SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+					    SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
+					    SDMA_POLL_REG_MEM_EXTRA_M));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq); /* reference */
+	amdgpu_ring_write(ring, 0xfffffff); /* mask */
+	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
+}
+
+/**
  * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
  *
  * @ring: amdgpu_ring pointer
@@ -1097,6 +1099,8 @@
 			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
 		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
 			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
+			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
 		mutex_lock(&adev->srbm_mutex);
 		for (j = 0; j < 16; j++) {
 			cik_srbm_select(adev, 0, 0, 0, j);
@@ -1297,12 +1301,14 @@
 	.parse_cs = NULL,
 	.emit_ib = cik_sdma_ring_emit_ib,
 	.emit_fence = cik_sdma_ring_emit_fence,
-	.emit_semaphore = cik_sdma_ring_emit_semaphore,
+	.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
 	.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
 	.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
 	.test_ring = cik_sdma_ring_test_ring,
 	.test_ib = cik_sdma_ring_test_ib,
 	.insert_nop = cik_sdma_ring_insert_nop,
+	.pad_ib = cik_sdma_ring_pad_ib,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1399,14 +1405,18 @@
 	.copy_pte = cik_sdma_vm_copy_pte,
 	.write_pte = cik_sdma_vm_write_pte,
 	.set_pte_pde = cik_sdma_vm_set_pte_pde,
-	.pad_ib = cik_sdma_vm_pad_ib,
 };
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
+	unsigned i;
+
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
-		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			adev->vm_manager.vm_pte_rings[i] =
+				&adev->sdma.instance[i].ring;
+
+		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 7f6d457..60d4493 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -46,9 +46,6 @@
 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
 #define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
 
-#define CIK_RB_BITMAP_WIDTH_PER_SH     2
-#define HAWAII_RB_BITMAP_WIDTH_PER_SH  4
-
 #define AMDGPU_NUM_OF_VMIDS	8
 
 #define		PIPEID(x)					((x) << 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 093599a..6de2ce53 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1668,6 +1668,9 @@
 {
 	int i;
 
+	if (!amdgpu_audio)
+		return;
+
 	if (!adev->mode_info.audio.enabled)
 		return;
 
@@ -1973,7 +1976,7 @@
 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
 }
 
-static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
 {
 	int i;
 
@@ -1986,8 +1989,16 @@
 		if (adev->mode_info.afmt[i]) {
 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
 			adev->mode_info.afmt[i]->id = i;
+		} else {
+			int j;
+			for (j = 0; j < i; j++) {
+				kfree(adev->mode_info.afmt[j]);
+				adev->mode_info.afmt[j] = NULL;
+			}
+			return -ENOMEM;
 		}
 	}
+	return 0;
 }
 
 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
@@ -2064,8 +2075,7 @@
 	if (atomic) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
 		target_fb = fb;
-	}
-	else {
+	} else {
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
 		target_fb = crtc->primary->fb;
 	}
@@ -2079,9 +2089,9 @@
 	if (unlikely(r != 0))
 		return r;
 
-	if (atomic)
+	if (atomic) {
 		fb_location = amdgpu_bo_gpu_offset(rbo);
-	else {
+	} else {
 		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
 			amdgpu_bo_unreserve(rbo);
@@ -2670,7 +2680,6 @@
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
 	drm_crtc_cleanup(crtc);
-	destroy_workqueue(amdgpu_crtc->pflip_queue);
 	kfree(amdgpu_crtc);
 }
 
@@ -2701,13 +2710,13 @@
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
 		dce_v10_0_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
 		if (amdgpu_crtc->enabled) {
 			dce_v10_0_vga_enable(crtc, true);
 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2890,7 +2899,6 @@
 
 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
 	amdgpu_crtc->crtc_id = index;
-	amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
 	adev->mode_info.crtcs[index] = amdgpu_crtc;
 
 	amdgpu_crtc->max_cursor_width = 128;
@@ -2982,8 +2990,6 @@
 	if (r)
 		return r;
 
-	adev->mode_info.mode_config_initialized = true;
-
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
 	adev->ddev->mode_config.max_width = 16384;
@@ -3014,7 +3020,9 @@
 		return -EINVAL;
 
 	/* setup afmt */
-	dce_v10_0_afmt_init(adev);
+	r = dce_v10_0_afmt_init(adev);
+	if (r)
+		return r;
 
 	r = dce_v10_0_audio_init(adev);
 	if (r)
@@ -3022,7 +3030,8 @@
 
 	drm_kms_helper_poll_init(adev->ddev);
 
-	return r;
+	adev->mode_info.mode_config_initialized = true;
+	return 0;
 }
 
 static int dce_v10_0_sw_fini(void *handle)
@@ -3366,7 +3375,7 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+	schedule_work(&works->unpin_work);
 
 	return 0;
 }
@@ -3624,16 +3633,8 @@
 
 }
 
-static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
 	.dpms = dce_v10_0_ext_dpms,
-	.mode_fixup = dce_v10_0_ext_mode_fixup,
 	.prepare = dce_v10_0_ext_prepare,
 	.mode_set = dce_v10_0_ext_mode_set,
 	.commit = dce_v10_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 8e67249..e9ccc6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1658,6 +1658,9 @@
 {
 	int i;
 
+	if (!amdgpu_audio)
+		return;
+
 	if (!adev->mode_info.audio.enabled)
 		return;
 
@@ -1963,7 +1966,7 @@
 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
 }
 
-static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
 {
 	int i;
 
@@ -1976,8 +1979,16 @@
 		if (adev->mode_info.afmt[i]) {
 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
 			adev->mode_info.afmt[i]->id = i;
+		} else {
+			int j;
+			for (j = 0; j < i; j++) {
+				kfree(adev->mode_info.afmt[j]);
+				adev->mode_info.afmt[j] = NULL;
+			}
+			return -ENOMEM;
 		}
 	}
+	return 0;
 }
 
 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
@@ -2054,8 +2065,7 @@
 	if (atomic) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
 		target_fb = fb;
-	}
-	else {
+	} else {
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
 		target_fb = crtc->primary->fb;
 	}
@@ -2069,9 +2079,9 @@
 	if (unlikely(r != 0))
 		return r;
 
-	if (atomic)
+	if (atomic) {
 		fb_location = amdgpu_bo_gpu_offset(rbo);
-	else {
+	} else {
 		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
 			amdgpu_bo_unreserve(rbo);
@@ -2661,7 +2671,6 @@
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
 	drm_crtc_cleanup(crtc);
-	destroy_workqueue(amdgpu_crtc->pflip_queue);
 	kfree(amdgpu_crtc);
 }
 
@@ -2692,13 +2701,13 @@
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
 		dce_v11_0_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
 		if (amdgpu_crtc->enabled) {
 			dce_v11_0_vga_enable(crtc, true);
 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2881,7 +2890,6 @@
 
 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
 	amdgpu_crtc->crtc_id = index;
-	amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
 	adev->mode_info.crtcs[index] = amdgpu_crtc;
 
 	amdgpu_crtc->max_cursor_width = 128;
@@ -2963,7 +2971,7 @@
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
 		if (r)
-		return r;
+			return r;
 	}
 
 	for (i = 8; i < 20; i += 2) {
@@ -2975,9 +2983,7 @@
 	/* HPD hotplug */
 	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
 	if (r)
-	return r;
-
-	adev->mode_info.mode_config_initialized = true;
+		return r;
 
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
@@ -2996,6 +3002,7 @@
 	adev->ddev->mode_config.max_width = 16384;
 	adev->ddev->mode_config.max_height = 16384;
 
+
 	/* allocate crtcs */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		r = dce_v11_0_crtc_init(adev, i);
@@ -3009,7 +3016,9 @@
 		return -EINVAL;
 
 	/* setup afmt */
-	dce_v11_0_afmt_init(adev);
+	r = dce_v11_0_afmt_init(adev);
+	if (r)
+		return r;
 
 	r = dce_v11_0_audio_init(adev);
 	if (r)
@@ -3017,7 +3026,8 @@
 
 	drm_kms_helper_poll_init(adev->ddev);
 
-	return r;
+	adev->mode_info.mode_config_initialized = true;
+	return 0;
 }
 
 static int dce_v11_0_sw_fini(void *handle)
@@ -3361,7 +3371,7 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+	schedule_work(&works->unpin_work);
 
 	return 0;
 }
@@ -3619,16 +3629,8 @@
 
 }
 
-static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
 	.dpms = dce_v11_0_ext_dpms,
-	.mode_fixup = dce_v11_0_ext_mode_fixup,
 	.prepare = dce_v11_0_ext_prepare,
 	.mode_set = dce_v11_0_ext_mode_set,
 	.commit = dce_v11_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index d0e128c..e56b55d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1639,6 +1639,9 @@
 {
 	int i;
 
+	if (!amdgpu_audio)
+		return;
+
 	if (!adev->mode_info.audio.enabled)
 		return;
 
@@ -1910,7 +1913,7 @@
 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
 }
 
-static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
 {
 	int i;
 
@@ -1923,8 +1926,16 @@
 		if (adev->mode_info.afmt[i]) {
 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
 			adev->mode_info.afmt[i]->id = i;
+		} else {
+			int j;
+			for (j = 0; j < i; j++) {
+				kfree(adev->mode_info.afmt[j]);
+				adev->mode_info.afmt[j] = NULL;
+			}
+			return -ENOMEM;
 		}
 	}
+	return 0;
 }
 
 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
@@ -2001,8 +2012,7 @@
 	if (atomic) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
 		target_fb = fb;
-	}
-	else {
+	} else {
 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
 		target_fb = crtc->primary->fb;
 	}
@@ -2016,9 +2026,9 @@
 	if (unlikely(r != 0))
 		return r;
 
-	if (atomic)
+	if (atomic) {
 		fb_location = amdgpu_bo_gpu_offset(rbo);
-	else {
+	} else {
 		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
 		if (unlikely(r != 0)) {
 			amdgpu_bo_unreserve(rbo);
@@ -2582,7 +2592,6 @@
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
 	drm_crtc_cleanup(crtc);
-	destroy_workqueue(amdgpu_crtc->pflip_queue);
 	kfree(amdgpu_crtc);
 }
 
@@ -2613,13 +2622,13 @@
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
 		dce_v8_0_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
 		if (amdgpu_crtc->enabled) {
 			dce_v8_0_vga_enable(crtc, true);
 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2809,7 +2818,6 @@
 
 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
 	amdgpu_crtc->crtc_id = index;
-	amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
 	adev->mode_info.crtcs[index] = amdgpu_crtc;
 
 	amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
@@ -2892,8 +2900,6 @@
 	if (r)
 		return r;
 
-	adev->mode_info.mode_config_initialized = true;
-
 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
 
 	adev->ddev->mode_config.max_width = 16384;
@@ -2924,7 +2930,9 @@
 		return -EINVAL;
 
 	/* setup afmt */
-	dce_v8_0_afmt_init(adev);
+	r = dce_v8_0_afmt_init(adev);
+	if (r)
+		return r;
 
 	r = dce_v8_0_audio_init(adev);
 	if (r)
@@ -2932,7 +2940,8 @@
 
 	drm_kms_helper_poll_init(adev->ddev);
 
-	return r;
+	adev->mode_info.mode_config_initialized = true;
+	return 0;
 }
 
 static int dce_v8_0_sw_fini(void *handle)
@@ -3375,7 +3384,7 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+	schedule_work(&works->unpin_work);
 
 	return 0;
 }
@@ -3554,16 +3563,8 @@
 
 }
 
-static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
 	.dpms = dce_v8_0_ext_dpms,
-	.mode_fixup = dce_v8_0_ext_mode_fixup,
 	.prepare = dce_v8_0_ext_prepare,
 	.mode_set = dce_v8_0_ext_mode_set,
 	.commit = dce_v8_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index e35340a..b336c91 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -272,6 +272,12 @@
 	if (!adev->pm.fw)
 		return -EINVAL;
 
+	/* Skip SMC ucode loading on SR-IOV capable boards.
+	 * vbios does this for us in asic_init in that case.
+	 */
+	if (adev->virtualization.supports_sr_iov)
+		return 0;
+
 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
 	amdgpu_ucode_print_smc_hdr(&hdr->header);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 06602df..bb8709066 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -31,8 +31,6 @@
 #include "amdgpu_ucode.h"
 #include "clearstate_ci.h"
 
-#include "uvd/uvd_4_2_d.h"
-
 #include "dce/dce_8_0_d.h"
 #include "dce/dce_8_0_sh_mask.h"
 
@@ -1006,9 +1004,15 @@
  */
 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
 {
-	const u32 num_tile_mode_states = 32;
-	const u32 num_secondary_tile_mode_states = 16;
-	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+	const u32 num_tile_mode_states =
+			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+	const u32 num_secondary_tile_mode_states =
+			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+	u32 reg_offset, split_equal_to_row_size;
+	uint32_t *tile, *macrotile;
+
+	tile = adev->gfx.config.tile_mode_array;
+	macrotile = adev->gfx.config.macrotile_mode_array;
 
 	switch (adev->gfx.config.mem_row_size_in_kb) {
 	case 1:
@@ -1023,832 +1027,531 @@
 		break;
 	}
 
+	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+		tile[reg_offset] = 0;
+	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+		macrotile[reg_offset] = 0;
+
 	switch (adev->asic_type) {
 	case CHIP_BONAIRE:
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 7:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
 
-			case 8:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16));
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 12:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 15:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 17:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 18:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 19:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 20:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 21:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 22:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 23:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 24:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 25:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 26:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 30:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
-		}
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			if (reg_offset != 7)
+				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
 		break;
 	case CHIP_HAWAII:
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 7:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
 
-			case 8:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 12:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 15:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 17:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 18:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 19:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
-				break;
-			case 20:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 21:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 22:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 23:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 24:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 25:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 26:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 30:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
-		}
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			if (reg_offset != 7)
+				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
 		break;
 	case CHIP_KABINI:
 	case CHIP_KAVERI:
 	case CHIP_MULLINS:
 	default:
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 7:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P2));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
 
-			case 8:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						PIPE_CONFIG(ADDR_SURF_P2));
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 12:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 15:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 17:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 18:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 19:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
-				break;
-			case 20:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 21:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 22:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 23:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 24:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 25:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 26:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						PIPE_CONFIG(ADDR_SURF_P2) |
-						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
-				break;
-			case 30:
-				gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
-		}
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			if (reg_offset != 7)
+				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
 		break;
 	}
 }
@@ -1893,45 +1596,31 @@
  */
 static u32 gfx_v7_0_create_bitmask(u32 bit_width)
 {
-	u32 i, mask = 0;
-
-	for (i = 0; i < bit_width; i++) {
-		mask <<= 1;
-		mask |= 1;
-	}
-	return mask;
+	return (u32)((1ULL << bit_width) - 1);
 }
 
 /**
- * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs
+ * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
  *
  * @adev: amdgpu_device pointer
- * @max_rb_num: max RBs (render backends) for the asic
- * @se_num: number of SEs (shader engines) for the asic
- * @sh_per_se: number of SH blocks per SE for the asic
  *
- * Calculates the bitmask of disabled RBs (CIK).
- * Returns the disabled RB bitmask.
+ * Calculates the bitmask of enabled RBs (CIK).
+ * Returns the enabled RB bitmask.
  */
-static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
-				    u32 max_rb_num_per_se,
-				    u32 sh_per_se)
+static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
 {
 	u32 data, mask;
 
 	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	if (data & 1)
-		data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-	else
-		data = 0;
-
 	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
+	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
 
-	mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
+				       adev->gfx.config.max_sh_per_se);
 
-	return data & mask;
+	return (~data) & mask;
 }
 
 /**
@@ -1940,73 +1629,31 @@
  * @adev: amdgpu_device pointer
  * @se_num: number of SEs (shader engines) for the asic
  * @sh_per_se: number of SH blocks per SE for the asic
- * @max_rb_num: max RBs (render backends) for the asic
  *
  * Configures per-SE/SH RB registers (CIK).
  */
-static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
-			      u32 se_num, u32 sh_per_se,
-			      u32 max_rb_num_per_se)
+static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
 {
 	int i, j;
-	u32 data, mask;
-	u32 disabled_rbs = 0;
-	u32 enabled_rbs = 0;
+	u32 data;
+	u32 active_rbs = 0;
+	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+					adev->gfx.config.max_sh_per_se;
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v7_0_select_se_sh(adev, i, j);
-			data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
-			if (adev->asic_type == CHIP_HAWAII)
-				disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
-			else
-				disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+			data = gfx_v7_0_get_rb_active_bitmap(adev);
+			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+					       rb_bitmap_width_per_sh);
 		}
 	}
 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
-	mask = 1;
-	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
-		if (!(disabled_rbs & mask))
-			enabled_rbs |= mask;
-		mask <<= 1;
-	}
-
-	adev->gfx.config.backend_enable_mask = enabled_rbs;
-
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
-		data = 0;
-		for (j = 0; j < sh_per_se; j++) {
-			switch (enabled_rbs & 3) {
-			case 0:
-				if (j == 0)
-					data |= (RASTER_CONFIG_RB_MAP_3 <<
-						PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
-				else
-					data |= (RASTER_CONFIG_RB_MAP_0 <<
-						PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
-				break;
-			case 1:
-				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
-				break;
-			case 2:
-				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
-				break;
-			case 3:
-			default:
-				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
-				break;
-			}
-			enabled_rbs >>= 2;
-		}
-		WREG32(mmPA_SC_RASTER_CONFIG, data);
-	}
-	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
+	adev->gfx.config.backend_enable_mask = active_rbs;
+	adev->gfx.config.num_rbs = hweight32(active_rbs);
 }
 
 /**
@@ -2059,192 +1706,23 @@
  */
 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
 {
-	u32 gb_addr_config;
-	u32 mc_shared_chmap, mc_arb_ramcfg;
-	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
-	u32 sh_mem_cfg;
-	u32 tmp;
+	u32 tmp, sh_mem_cfg;
 	int i;
 
-	switch (adev->asic_type) {
-	case CHIP_BONAIRE:
-		adev->gfx.config.max_shader_engines = 2;
-		adev->gfx.config.max_tile_pipes = 4;
-		adev->gfx.config.max_cu_per_sh = 7;
-		adev->gfx.config.max_sh_per_se = 1;
-		adev->gfx.config.max_backends_per_se = 2;
-		adev->gfx.config.max_texture_channel_caches = 4;
-		adev->gfx.config.max_gprs = 256;
-		adev->gfx.config.max_gs_threads = 32;
-		adev->gfx.config.max_hw_contexts = 8;
-
-		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
-		break;
-	case CHIP_HAWAII:
-		adev->gfx.config.max_shader_engines = 4;
-		adev->gfx.config.max_tile_pipes = 16;
-		adev->gfx.config.max_cu_per_sh = 11;
-		adev->gfx.config.max_sh_per_se = 1;
-		adev->gfx.config.max_backends_per_se = 4;
-		adev->gfx.config.max_texture_channel_caches = 16;
-		adev->gfx.config.max_gprs = 256;
-		adev->gfx.config.max_gs_threads = 32;
-		adev->gfx.config.max_hw_contexts = 8;
-
-		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
-		break;
-	case CHIP_KAVERI:
-		adev->gfx.config.max_shader_engines = 1;
-		adev->gfx.config.max_tile_pipes = 4;
-		if ((adev->pdev->device == 0x1304) ||
-		    (adev->pdev->device == 0x1305) ||
-		    (adev->pdev->device == 0x130C) ||
-		    (adev->pdev->device == 0x130F) ||
-		    (adev->pdev->device == 0x1310) ||
-		    (adev->pdev->device == 0x1311) ||
-		    (adev->pdev->device == 0x131C)) {
-			adev->gfx.config.max_cu_per_sh = 8;
-			adev->gfx.config.max_backends_per_se = 2;
-		} else if ((adev->pdev->device == 0x1309) ||
-			   (adev->pdev->device == 0x130A) ||
-			   (adev->pdev->device == 0x130D) ||
-			   (adev->pdev->device == 0x1313) ||
-			   (adev->pdev->device == 0x131D)) {
-			adev->gfx.config.max_cu_per_sh = 6;
-			adev->gfx.config.max_backends_per_se = 2;
-		} else if ((adev->pdev->device == 0x1306) ||
-			   (adev->pdev->device == 0x1307) ||
-			   (adev->pdev->device == 0x130B) ||
-			   (adev->pdev->device == 0x130E) ||
-			   (adev->pdev->device == 0x1315) ||
-			   (adev->pdev->device == 0x131B)) {
-			adev->gfx.config.max_cu_per_sh = 4;
-			adev->gfx.config.max_backends_per_se = 1;
-		} else {
-			adev->gfx.config.max_cu_per_sh = 3;
-			adev->gfx.config.max_backends_per_se = 1;
-		}
-		adev->gfx.config.max_sh_per_se = 1;
-		adev->gfx.config.max_texture_channel_caches = 4;
-		adev->gfx.config.max_gprs = 256;
-		adev->gfx.config.max_gs_threads = 16;
-		adev->gfx.config.max_hw_contexts = 8;
-
-		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
-		break;
-	case CHIP_KABINI:
-	case CHIP_MULLINS:
-	default:
-		adev->gfx.config.max_shader_engines = 1;
-		adev->gfx.config.max_tile_pipes = 2;
-		adev->gfx.config.max_cu_per_sh = 2;
-		adev->gfx.config.max_sh_per_se = 1;
-		adev->gfx.config.max_backends_per_se = 1;
-		adev->gfx.config.max_texture_channel_caches = 2;
-		adev->gfx.config.max_gprs = 256;
-		adev->gfx.config.max_gs_threads = 16;
-		adev->gfx.config.max_hw_contexts = 8;
-
-		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
-		break;
-	}
-
 	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
 
-	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
-	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
-	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
-
-	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
-	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMD_IS_APU) {
-		/* Get memory bank mapping mode. */
-		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
-		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
-		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-		/* Validate settings in case only one DIMM installed. */
-		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
-			dimm00_addr_map = 0;
-		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
-			dimm01_addr_map = 0;
-		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
-			dimm10_addr_map = 0;
-		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
-			dimm11_addr_map = 0;
-
-		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
-		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
-		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
-			adev->gfx.config.mem_row_size_in_kb = 2;
-		else
-			adev->gfx.config.mem_row_size_in_kb = 1;
-	} else {
-		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
-		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
-		if (adev->gfx.config.mem_row_size_in_kb > 4)
-			adev->gfx.config.mem_row_size_in_kb = 4;
-	}
-	/* XXX use MC settings? */
-	adev->gfx.config.shader_engine_tile_size = 32;
-	adev->gfx.config.num_gpus = 1;
-	adev->gfx.config.multi_gpu_tile_size = 64;
-
-	/* fix up row size */
-	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
-	switch (adev->gfx.config.mem_row_size_in_kb) {
-	case 1:
-	default:
-		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
-		break;
-	case 2:
-		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
-		break;
-	case 4:
-		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
-		break;
-	}
-	adev->gfx.config.gb_addr_config = gb_addr_config;
-
-	WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
-	WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
-	WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
-	WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
-	WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
-	WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
-	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
-	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
 
 	gfx_v7_0_tiling_mode_table_init(adev);
 
-	gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
-			  adev->gfx.config.max_sh_per_se,
-			  adev->gfx.config.max_backends_per_se);
+	gfx_v7_0_setup_rb(adev);
 
 	/* set HW defaults for 3D engine */
 	WREG32(mmCP_MEQ_THRESHOLDS,
-			(0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
-			(0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	/*
@@ -2255,7 +1733,7 @@
 
 	/* XXX SH_MEM regs */
 	/* where to put LDS, scratch, GPUVM in FSA64 space */
-	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 
+	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
 				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 
 	mutex_lock(&adev->srbm_mutex);
@@ -2379,7 +1857,7 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
-	r = amdgpu_ring_lock(ring, 3);
+	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
 		amdgpu_gfx_scratch_free(adev, scratch);
@@ -2388,7 +1866,7 @@
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -2447,6 +1925,25 @@
 }
 
 /**
+ * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0) |
+				 WR_CONFIRM));
+	amdgpu_ring_write(ring, mmHDP_DEBUG0);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 1);
+}
+
+/**
  * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
  *
  * @adev: amdgpu_device pointer
@@ -2516,36 +2013,6 @@
 	amdgpu_ring_write(ring, upper_32_bits(seq));
 }
 
-/**
- * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
- *
- * @ring: amdgpu ring buffer object
- * @semaphore: amdgpu semaphore object
- * @emit_wait: Is this a sempahore wait?
- *
- * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
- * from running ahead of semaphore waits.
- */
-static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
-	amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
-	amdgpu_ring_write(ring, addr & 0xffffffff);
-	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
-
-	if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
-		/* Prevent the PFP from running ahead of the semaphore wait */
-		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
-		amdgpu_ring_write(ring, 0x0);
-	}
-
-	return true;
-}
-
 /*
  * IB stuff
  */
@@ -2593,8 +2060,7 @@
 	else
 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw |
-		(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+	control |= ib->length_dw | (ib->vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -2622,8 +2088,7 @@
 
 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw |
-			   (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+	control |= ib->length_dw | (ib->vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -2661,7 +2126,7 @@
 	}
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		goto err1;
@@ -2671,9 +2136,7 @@
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
 		goto err2;
 
@@ -2700,7 +2163,8 @@
 
 err2:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
@@ -2842,7 +2306,7 @@
 
 	gfx_v7_0_cp_gfx_enable(adev, true);
 
-	r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
+	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
 		return r;
@@ -2911,7 +2375,7 @@
 	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
 	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	return 0;
 }
@@ -2989,21 +2453,14 @@
 
 static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
-	rptr = ring->adev->wb.wb[ring->rptr_offs];
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs];
 }
 
 static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	u32 wptr;
 
-	wptr = RREG32(mmCP_RB0_WPTR);
-
-	return wptr;
+	return RREG32(mmCP_RB0_WPTR);
 }
 
 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -3016,21 +2473,13 @@
 
 static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
 {
-	u32 rptr;
-
-	rptr = ring->adev->wb.wb[ring->rptr_offs];
-
-	return rptr;
+	return ring->adev->wb.wb[ring->rptr_offs];
 }
 
 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
-	u32 wptr;
-
 	/* XXX check if swapping is necessary on BE */
-	wptr = ring->adev->wb.wb[ring->wptr_offs];
-
-	return wptr;
+	return ring->adev->wb.wb[ring->wptr_offs];
 }
 
 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
@@ -3126,21 +2575,6 @@
 }
 
 /**
- * gfx_v7_0_cp_compute_start - start the compute queues
- *
- * @adev: amdgpu_device pointer
- *
- * Enable the compute queues.
- * Returns 0 for success, error for failure.
- */
-static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev)
-{
-	gfx_v7_0_cp_compute_enable(adev, true);
-
-	return 0;
-}
-
-/**
  * gfx_v7_0_cp_compute_fini - stop the compute queues
  *
  * @adev: amdgpu_device pointer
@@ -3330,9 +2764,7 @@
 	u32 *buf;
 	struct bonaire_mqd *mqd;
 
-	r = gfx_v7_0_cp_compute_start(adev);
-	if (r)
-		return r;
+	gfx_v7_0_cp_compute_enable(adev, true);
 
 	/* fix up chicken bits */
 	tmp = RREG32(mmCP_CPF_DEBUG);
@@ -3610,6 +3042,26 @@
 	return 0;
 }
 
+/**
+ * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ *
+ * @ring: the ring to emmit the commands to
+ *
+ * Sync the command pipeline with the PFP. E.g. wait for everything
+ * to be completed.
+ */
+static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+	if (usepfp) {
+		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		amdgpu_ring_write(ring, 0);
+	}
+}
+
 /*
  * vm
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3641,14 +3093,6 @@
 	amdgpu_ring_write(ring, 0xffffffff);
 	amdgpu_ring_write(ring, 4); /* poll interval */
 
-	if (usepfp) {
-		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-		amdgpu_ring_write(ring, 0);
-	}
-
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
 				 WRITE_DATA_DST_SEL(0)));
@@ -4408,28 +3852,19 @@
 	}
 }
 
-static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev,
-					 u32 se, u32 sh)
+static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
-	u32 mask = 0, tmp, tmp1;
-	int i;
+	u32 data, mask;
 
-	gfx_v7_0_select_se_sh(adev, se, sh);
-	tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
-	tmp &= 0xffff0000;
+	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
 
-	tmp |= tmp1;
-	tmp >>= 16;
+	mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
-	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
-		mask <<= 1;
-		mask |= 1;
-	}
-
-	return (~tmp) & mask;
+	return (~data) & mask;
 }
 
 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
@@ -4767,6 +4202,172 @@
 	return 0;
 }
 
+static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
+{
+	u32 gb_addr_config;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+	u32 tmp;
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		adev->gfx.config.max_shader_engines = 2;
+		adev->gfx.config.max_tile_pipes = 4;
+		adev->gfx.config.max_cu_per_sh = 7;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 2;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAWAII:
+		adev->gfx.config.max_shader_engines = 4;
+		adev->gfx.config.max_tile_pipes = 16;
+		adev->gfx.config.max_cu_per_sh = 11;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 16;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_KAVERI:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 4;
+		if ((adev->pdev->device == 0x1304) ||
+		    (adev->pdev->device == 0x1305) ||
+		    (adev->pdev->device == 0x130C) ||
+		    (adev->pdev->device == 0x130F) ||
+		    (adev->pdev->device == 0x1310) ||
+		    (adev->pdev->device == 0x1311) ||
+		    (adev->pdev->device == 0x131C)) {
+			adev->gfx.config.max_cu_per_sh = 8;
+			adev->gfx.config.max_backends_per_se = 2;
+		} else if ((adev->pdev->device == 0x1309) ||
+			   (adev->pdev->device == 0x130A) ||
+			   (adev->pdev->device == 0x130D) ||
+			   (adev->pdev->device == 0x1313) ||
+			   (adev->pdev->device == 0x131D)) {
+			adev->gfx.config.max_cu_per_sh = 6;
+			adev->gfx.config.max_backends_per_se = 2;
+		} else if ((adev->pdev->device == 0x1306) ||
+			   (adev->pdev->device == 0x1307) ||
+			   (adev->pdev->device == 0x130B) ||
+			   (adev->pdev->device == 0x130E) ||
+			   (adev->pdev->device == 0x1315) ||
+			   (adev->pdev->device == 0x131B)) {
+			adev->gfx.config.max_cu_per_sh = 4;
+			adev->gfx.config.max_backends_per_se = 1;
+		} else {
+			adev->gfx.config.max_cu_per_sh = 3;
+			adev->gfx.config.max_backends_per_se = 1;
+		}
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_texture_channel_caches = 4;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+	default:
+		adev->gfx.config.max_shader_engines = 1;
+		adev->gfx.config.max_tile_pipes = 2;
+		adev->gfx.config.max_cu_per_sh = 2;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 1;
+		adev->gfx.config.max_texture_channel_caches = 2;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 16;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+	adev->gfx.config.mem_max_burst_length_bytes = 256;
+	if (adev->flags & AMD_IS_APU) {
+		/* Get memory bank mapping mode. */
+		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+		/* Validate settings in case only one DIMM installed. */
+		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+			dimm00_addr_map = 0;
+		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+			dimm01_addr_map = 0;
+		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+			dimm10_addr_map = 0;
+		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+			dimm11_addr_map = 0;
+
+		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+			adev->gfx.config.mem_row_size_in_kb = 2;
+		else
+			adev->gfx.config.mem_row_size_in_kb = 1;
+	} else {
+		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
+		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+		if (adev->gfx.config.mem_row_size_in_kb > 4)
+			adev->gfx.config.mem_row_size_in_kb = 4;
+	}
+	/* XXX use MC settings? */
+	adev->gfx.config.shader_engine_tile_size = 32;
+	adev->gfx.config.num_gpus = 1;
+	adev->gfx.config.multi_gpu_tile_size = 64;
+
+	/* fix up row size */
+	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
+	switch (adev->gfx.config.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+		break;
+	case 2:
+		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+		break;
+	case 4:
+		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+		break;
+	}
+	adev->gfx.config.gb_addr_config = gb_addr_config;
+}
+
 static int gfx_v7_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
@@ -4870,6 +4471,10 @@
 	if (r)
 		return r;
 
+	adev->gfx.ce_ram_size = 0x8000;
+
+	gfx_v7_0_gpu_early_init(adev);
+
 	return r;
 }
 
@@ -4910,8 +4515,6 @@
 	if (r)
 		return r;
 
-	adev->gfx.ce_ram_size = 0x8000;
-
 	return r;
 }
 
@@ -5028,16 +4631,6 @@
 		 RREG32(mmHDP_ADDR_CONFIG));
 	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
 		 RREG32(mmDMIF_ADDR_CALC));
-	dev_info(adev->dev, "  SDMA0_TILING_CONFIG=0x%08X\n",
-		 RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
-	dev_info(adev->dev, "  SDMA1_TILING_CONFIG=0x%08X\n",
-		 RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
 
 	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
 		 RREG32(mmCP_MEQ_THRESHOLDS));
@@ -5580,13 +5173,15 @@
 	.parse_cs = NULL,
 	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
-	.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5596,13 +5191,15 @@
 	.parse_cs = NULL,
 	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
-	.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5672,7 +5269,7 @@
 
 
 int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
-								   struct amdgpu_cu_info *cu_info)
+			 struct amdgpu_cu_info *cu_info)
 {
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
@@ -5680,16 +5277,19 @@
 	if (!adev || !cu_info)
 		return -EINVAL;
 
+	memset(cu_info, 0, sizeof(*cu_info));
+
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			mask = 1;
 			ao_bitmap = 0;
 			counter = 0;
-			bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j);
+			gfx_v7_0_select_se_sh(adev, i, j);
+			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
 			cu_info->bitmap[i][j] = bitmap;
 
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+			for (k = 0; k < 16; k ++) {
 				if (bitmap & mask) {
 					if (counter < 2)
 						ao_bitmap |= mask;
@@ -5701,9 +5301,11 @@
 			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 		}
 	}
+	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
-	mutex_unlock(&adev->grbm_idx_mutex);
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7086ac1..f0c7b35 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -43,9 +43,6 @@
 #include "gca/gfx_8_0_sh_mask.h"
 #include "gca/gfx_8_0_enum.h"
 
-#include "uvd/uvd_5_0_d.h"
-#include "uvd/uvd_5_0_sh_mask.h"
-
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
@@ -652,7 +649,7 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
-	r = amdgpu_ring_lock(ring, 3);
+	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 			  ring->idx, r);
@@ -662,7 +659,7 @@
 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -699,7 +696,7 @@
 	}
 	WREG32(scratch, 0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		goto err1;
@@ -709,9 +706,7 @@
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
 		goto err2;
 
@@ -737,7 +732,8 @@
 	}
 err2:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 err1:
 	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
@@ -1171,7 +1167,7 @@
 
 	/* allocate an indirect buffer to put the commands in */
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, total_size, &ib);
+	r = amdgpu_ib_get(adev, NULL, total_size, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		return r;
@@ -1266,9 +1262,7 @@
 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 
 	/* shedule the ib on the ring */
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r) {
 		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
 		goto fail;
@@ -1296,7 +1290,8 @@
 
 fail:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 
 	return r;
 }
@@ -2574,11 +2569,6 @@
 	}
 }
 
-static u32 gfx_v8_0_create_bitmask(u32 bit_width)
-{
-	return (u32)((1ULL << bit_width) - 1);
-}
-
 void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
 {
 	u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
@@ -2599,89 +2589,49 @@
 	WREG32(mmGRBM_GFX_INDEX, data);
 }
 
-static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
-				    u32 max_rb_num_per_se,
-				    u32 sh_per_se)
+static u32 gfx_v8_0_create_bitmask(u32 bit_width)
+{
+	return (u32)((1ULL << bit_width) - 1);
+}
+
+static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
 {
 	u32 data, mask;
 
 	data = RREG32(mmCC_RB_BACKEND_DISABLE);
-	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-
 	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
+	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
 
-	mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
+				       adev->gfx.config.max_sh_per_se);
 
-	return data & mask;
+	return (~data) & mask;
 }
 
-static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
-			      u32 se_num, u32 sh_per_se,
-			      u32 max_rb_num_per_se)
+static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
 {
 	int i, j;
-	u32 data, mask;
-	u32 disabled_rbs = 0;
-	u32 enabled_rbs = 0;
+	u32 data;
+	u32 active_rbs = 0;
+	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+					adev->gfx.config.max_sh_per_se;
 
 	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		for (j = 0; j < sh_per_se; j++) {
+	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			gfx_v8_0_select_se_sh(adev, i, j);
-			data = gfx_v8_0_get_rb_disabled(adev,
-					      max_rb_num_per_se, sh_per_se);
-			disabled_rbs |= data << ((i * sh_per_se + j) *
-						 RB_BITMAP_WIDTH_PER_SH);
+			data = gfx_v8_0_get_rb_active_bitmap(adev);
+			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+					       rb_bitmap_width_per_sh);
 		}
 	}
 	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
 	mutex_unlock(&adev->grbm_idx_mutex);
 
-	mask = 1;
-	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
-		if (!(disabled_rbs & mask))
-			enabled_rbs |= mask;
-		mask <<= 1;
-	}
-
-	adev->gfx.config.backend_enable_mask = enabled_rbs;
-
-	mutex_lock(&adev->grbm_idx_mutex);
-	for (i = 0; i < se_num; i++) {
-		gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
-		data = RREG32(mmPA_SC_RASTER_CONFIG);
-		for (j = 0; j < sh_per_se; j++) {
-			switch (enabled_rbs & 3) {
-			case 0:
-				if (j == 0)
-					data |= (RASTER_CONFIG_RB_MAP_3 <<
-						 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
-				else
-					data |= (RASTER_CONFIG_RB_MAP_0 <<
-						 PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
-				break;
-			case 1:
-				data |= (RASTER_CONFIG_RB_MAP_0 <<
-					 (i * sh_per_se + j) * 2);
-				break;
-			case 2:
-				data |= (RASTER_CONFIG_RB_MAP_3 <<
-					 (i * sh_per_se + j) * 2);
-				break;
-			case 3:
-			default:
-				data |= (RASTER_CONFIG_RB_MAP_2 <<
-					 (i * sh_per_se + j) * 2);
-				break;
-			}
-			enabled_rbs >>= 2;
-		}
-		WREG32(mmPA_SC_RASTER_CONFIG, data);
-	}
-	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-	mutex_unlock(&adev->grbm_idx_mutex);
+	adev->gfx.config.backend_enable_mask = active_rbs;
+	adev->gfx.config.num_rbs = hweight32(active_rbs);
 }
 
 /**
@@ -2741,19 +2691,10 @@
 	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
-	WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
-	       adev->gfx.config.gb_addr_config & 0x70);
-	WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
-	       adev->gfx.config.gb_addr_config & 0x70);
-	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
-	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
-	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 
 	gfx_v8_0_tiling_mode_table_init(adev);
 
-	gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
-				 adev->gfx.config.max_sh_per_se,
-				 adev->gfx.config.max_backends_per_se);
+	gfx_v8_0_setup_rb(adev);
 
 	/* XXX SH_MEM regs */
 	/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -3062,7 +3003,7 @@
 
 	gfx_v8_0_cp_gfx_enable(adev, true);
 
-	r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4);
+	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
 		return r;
@@ -3126,7 +3067,7 @@
 	amdgpu_ring_write(ring, 0x8000);
 	amdgpu_ring_write(ring, 0x8000);
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	return 0;
 }
@@ -3226,13 +3167,6 @@
 	udelay(50);
 }
 
-static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev)
-{
-	gfx_v8_0_cp_compute_enable(adev, true);
-
-	return 0;
-}
-
 static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 {
 	const struct gfx_firmware_header_v1_0 *mec_hdr;
@@ -3802,9 +3736,7 @@
 		WREG32(mmCP_PQ_STATUS, tmp);
 	}
 
-	r = gfx_v8_0_cp_compute_start(adev);
-	if (r)
-		return r;
+	gfx_v8_0_cp_compute_enable(adev, true);
 
 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
@@ -4016,16 +3948,6 @@
 		 RREG32(mmHDP_ADDR_CONFIG));
 	dev_info(adev->dev, "  DMIF_ADDR_CALC=0x%08X\n",
 		 RREG32(mmDMIF_ADDR_CALC));
-	dev_info(adev->dev, "  SDMA0_TILING_CONFIG=0x%08X\n",
-		 RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
-	dev_info(adev->dev, "  SDMA1_TILING_CONFIG=0x%08X\n",
-		 RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
-	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
-	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
-		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
 
 	dev_info(adev->dev, "  CP_MEQ_THRESHOLDS=0x%08X\n",
 		 RREG32(mmCP_MEQ_THRESHOLDS));
@@ -4667,6 +4589,18 @@
 	amdgpu_ring_write(ring, 0x20); /* poll interval */
 }
 
+static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0) |
+				 WR_CONFIRM));
+	amdgpu_ring_write(ring, mmHDP_DEBUG0);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 1);
+
+}
+
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 				  struct amdgpu_ib *ib)
 {
@@ -4699,8 +4633,7 @@
 	else
 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw |
-		(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+	control |= ib->length_dw | (ib->vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -4729,8 +4662,7 @@
 
 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
 
-	control |= ib->length_dw |
-			   (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+	control |= ib->length_dw | (ib->vm_id << 24);
 
 	amdgpu_ring_write(ring, header);
 	amdgpu_ring_write(ring,
@@ -4762,49 +4694,10 @@
 
 }
 
-/**
- * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring
- *
- * @ring: amdgpu ring buffer object
- * @semaphore: amdgpu semaphore object
- * @emit_wait: Is this a sempahore wait?
- *
- * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
- * from running ahead of semaphore waits.
- */
-static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
-	if (ring->adev->asic_type == CHIP_TOPAZ ||
-	    ring->adev->asic_type == CHIP_TONGA ||
-	    ring->adev->asic_type == CHIP_FIJI)
-		/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
-		return false;
-	else {
-		amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2));
-		amdgpu_ring_write(ring, lower_32_bits(addr));
-		amdgpu_ring_write(ring, upper_32_bits(addr));
-		amdgpu_ring_write(ring, sel);
-	}
-
-	if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
-		/* Prevent the PFP from running ahead of the semaphore wait */
-		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
-		amdgpu_ring_write(ring, 0x0);
-	}
-
-	return true;
-}
-
-static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
-					unsigned vm_id, uint64_t pd_addr)
+static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 {
 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
-	uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+	uint32_t seq = ring->fence_drv.sync_seq;
 	uint64_t addr = ring->fence_drv.gpu_addr;
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -4824,6 +4717,12 @@
 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
 		amdgpu_ring_write(ring, 0);
 	}
+}
+
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+					unsigned vm_id, uint64_t pd_addr)
+{
+	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
 
 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -5146,13 +5045,15 @@
 	.parse_cs = NULL,
 	.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
 	.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
-	.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
 	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
 	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
 	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -5162,13 +5063,15 @@
 	.parse_cs = NULL,
 	.emit_ib = gfx_v8_0_ring_emit_ib_compute,
 	.emit_fence = gfx_v8_0_ring_emit_fence_compute,
-	.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+	.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
 	.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
 	.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
 	.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5237,32 +5140,23 @@
 	}
 }
 
-static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev,
-		u32 se, u32 sh)
+static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
-	u32 mask = 0, tmp, tmp1;
-	int i;
+	u32 data, mask;
 
-	gfx_v8_0_select_se_sh(adev, se, sh);
-	tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
-	tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
 
-	tmp &= 0xffff0000;
+	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
 
-	tmp |= tmp1;
-	tmp >>= 16;
+	mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
 
-	for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
-		mask <<= 1;
-		mask |= 1;
-	}
-
-	return (~tmp) & mask;
+	return (~data) & mask;
 }
 
 int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
-						 struct amdgpu_cu_info *cu_info)
+			 struct amdgpu_cu_info *cu_info)
 {
 	int i, j, k, counter, active_cu_number = 0;
 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
@@ -5270,16 +5164,19 @@
 	if (!adev || !cu_info)
 		return -EINVAL;
 
+	memset(cu_info, 0, sizeof(*cu_info));
+
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
 			mask = 1;
 			ao_bitmap = 0;
 			counter = 0;
-			bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j);
+			gfx_v8_0_select_se_sh(adev, i, j);
+			bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
 			cu_info->bitmap[i][j] = bitmap;
 
-			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+			for (k = 0; k < 16; k ++) {
 				if (bitmap & mask) {
 					if (counter < 2)
 						ao_bitmap |= mask;
@@ -5291,9 +5188,11 @@
 			ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
 		}
 	}
+	gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
 
 	cu_info->number = active_cu_number;
 	cu_info->ao_cu_mask = ao_cu_mask;
-	mutex_unlock(&adev->grbm_idx_mutex);
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index b806079..82ce7d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -339,7 +339,7 @@
 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
 	tmp = RREG32(mmHDP_MISC_CNTL);
-	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 	WREG32(mmHDP_MISC_CNTL, tmp);
 
 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -694,7 +694,8 @@
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
 
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU) {
@@ -926,10 +927,6 @@
 	int dma_bits;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = amdgpu_gem_init(adev);
-	if (r)
-		return r;
-
 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 	if (r)
 		return r;
@@ -1010,7 +1007,7 @@
 		adev->vm_manager.enabled = false;
 	}
 	gmc_v7_0_gart_fini(adev);
-	amdgpu_gem_fini(adev);
+	amdgpu_gem_force_release(adev);
 	amdgpu_bo_fini(adev);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 3efd455..29bd7b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -252,6 +252,12 @@
 	if (!adev->mc.fw)
 		return -EINVAL;
 
+	/* Skip MC ucode loading on SR-IOV capable boards.
+	 * vbios does this for us in asic_init in that case.
+	 */
+	if (adev->virtualization.supports_sr_iov)
+		return 0;
+
 	hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
 	amdgpu_ucode_print_mc_hdr(&hdr->header);
 
@@ -380,7 +386,7 @@
 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
 	tmp = RREG32(mmHDP_MISC_CNTL);
-	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
 	WREG32(mmHDP_MISC_CNTL, tmp);
 
 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -774,7 +780,8 @@
 	 * amdgpu graphics/compute will use VMIDs 1-7
 	 * amdkfd will use VMIDs 8-15
 	 */
-	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+	amdgpu_vm_manager_init(adev);
 
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU) {
@@ -880,10 +887,6 @@
 	int dma_bits;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = amdgpu_gem_init(adev);
-	if (r)
-		return r;
-
 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 	if (r)
 		return r;
@@ -964,7 +967,7 @@
 		adev->vm_manager.enabled = false;
 	}
 	gmc_v8_0_gart_fini(adev);
-	amdgpu_gem_fini(adev);
+	amdgpu_gem_force_release(adev);
 	amdgpu_bo_fini(adev);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 090486c..52ee081 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -279,6 +279,12 @@
 	if (!adev->pm.fw)
 		return -EINVAL;
 
+	/* Skip SMC ucode loading on SR-IOV capable boards.
+	 * vbios does this for us in asic_init in that case.
+	 */
+	if (adev->virtualization.supports_sr_iov)
+		return 0;
+
 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
 	amdgpu_ucode_print_smc_hdr(&hdr->header);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2cf5018..6e0a86a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
 #include "oss/oss_2_4_d.h"
 #include "oss/oss_2_4_sh_mask.h"
 
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
 
 #include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_enum.h"
@@ -244,7 +244,7 @@
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 				   struct amdgpu_ib *ib)
 {
-	u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+	u32 vmid = ib->vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 2)
@@ -300,6 +300,13 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+	amdgpu_ring_write(ring, mmHDP_DEBUG0);
+	amdgpu_ring_write(ring, 1);
+}
 /**
  * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
  *
@@ -335,31 +342,6 @@
 }
 
 /**
- * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (VI).
- */
-static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
-					  struct amdgpu_semaphore *semaphore,
-					  bool emit_wait)
-{
-	u64 addr = semaphore->gpu_addr;
-	u32 sig = emit_wait ? 0 : 1;
-
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
-			  SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
-	amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
-	amdgpu_ring_write(ring, upper_32_bits(addr));
-
-	return true;
-}
-
-/**
  * sdma_v2_4_gfx_stop - stop the gfx async dma engines
  *
  * @adev: amdgpu_device pointer
@@ -459,6 +441,9 @@
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		mutex_unlock(&adev->srbm_mutex);
 
+		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+		       adev->gfx.config.gb_addr_config & 0x70);
+
 		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
 		/* Set ring buffer size in dwords */
@@ -636,7 +621,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
-	r = amdgpu_ring_lock(ring, 5);
+	r = amdgpu_ring_alloc(ring, 5);
 	if (r) {
 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 		amdgpu_wb_free(adev, index);
@@ -649,7 +634,7 @@
 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -699,7 +684,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		goto err0;
@@ -716,9 +701,7 @@
 	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -744,7 +727,8 @@
 
 err1:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -797,7 +781,7 @@
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
-				   uint64_t pe,
+				   const dma_addr_t *pages_addr, uint64_t pe,
 				   uint64_t addr, unsigned count,
 				   uint32_t incr, uint32_t flags)
 {
@@ -816,14 +800,7 @@
 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 		ib->ptr[ib->length_dw++] = ndw;
 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			if (flags & AMDGPU_PTE_SYSTEM) {
-				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-				value &= 0xFFFFFFFFFFFFF000ULL;
-			} else if (flags & AMDGPU_PTE_VALID) {
-				value = addr;
-			} else {
-				value = 0;
-			}
+			value = amdgpu_vm_map_gart(pages_addr, addr);
 			addr += incr;
 			value |= flags;
 			ib->ptr[ib->length_dw++] = value;
@@ -881,14 +858,14 @@
 }
 
 /**
- * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw
+ * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
  *
  * @ib: indirect buffer to fill with padding
  *
  */
-static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
+static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 	u32 pad_count;
 	int i;
 
@@ -904,6 +881,31 @@
 }
 
 /**
+ * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	/* wait for idle */
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq); /* reference */
+	amdgpu_ring_write(ring, 0xfffffff); /* mask */
+	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
  * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
  *
  * @ring: amdgpu_ring pointer
@@ -1111,6 +1113,8 @@
 			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
 		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
 			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
+			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
 		mutex_lock(&adev->srbm_mutex);
 		for (j = 0; j < 16; j++) {
 			vi_srbm_select(adev, 0, 0, 0, j);
@@ -1302,12 +1306,14 @@
 	.parse_cs = NULL,
 	.emit_ib = sdma_v2_4_ring_emit_ib,
 	.emit_fence = sdma_v2_4_ring_emit_fence,
-	.emit_semaphore = sdma_v2_4_ring_emit_semaphore,
+	.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
 	.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
 	.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
 	.test_ring = sdma_v2_4_ring_test_ring,
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.insert_nop = sdma_v2_4_ring_insert_nop,
+	.pad_ib = sdma_v2_4_ring_pad_ib,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1405,14 +1411,18 @@
 	.copy_pte = sdma_v2_4_vm_copy_pte,
 	.write_pte = sdma_v2_4_vm_write_pte,
 	.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
-	.pad_ib = sdma_v2_4_vm_pad_ib,
 };
 
 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
+	unsigned i;
+
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
-		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
-		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			adev->vm_manager.vm_pte_rings[i] =
+				&adev->sdma.instance[i].ring;
+
+		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ad54c46..8c8ca98 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -355,7 +355,7 @@
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
 				   struct amdgpu_ib *ib)
 {
-	u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+	u32 vmid = ib->vm_id & 0xf;
 	u32 next_rptr = ring->wptr + 5;
 
 	while ((next_rptr & 7) != 2)
@@ -410,6 +410,14 @@
 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+	amdgpu_ring_write(ring, mmHDP_DEBUG0);
+	amdgpu_ring_write(ring, 1);
+}
+
 /**
  * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
  *
@@ -444,32 +452,6 @@
 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
-
-/**
- * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (VI).
- */
-static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
-					  struct amdgpu_semaphore *semaphore,
-					  bool emit_wait)
-{
-	u64 addr = semaphore->gpu_addr;
-	u32 sig = emit_wait ? 0 : 1;
-
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
-			  SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
-	amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
-	amdgpu_ring_write(ring, upper_32_bits(addr));
-
-	return true;
-}
-
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -596,6 +578,9 @@
 		vi_srbm_select(adev, 0, 0, 0, 0);
 		mutex_unlock(&adev->srbm_mutex);
 
+		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+		       adev->gfx.config.gb_addr_config & 0x70);
+
 		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 
 		/* Set ring buffer size in dwords */
@@ -788,7 +773,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 
-	r = amdgpu_ring_lock(ring, 5);
+	r = amdgpu_ring_alloc(ring, 5);
 	if (r) {
 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 		amdgpu_wb_free(adev, index);
@@ -801,7 +786,7 @@
 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -851,7 +836,7 @@
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	r = amdgpu_ib_get(adev, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		goto err0;
@@ -868,9 +853,7 @@
 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
-						 AMDGPU_FENCE_OWNER_UNDEFINED,
-						 &f);
+	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
 		goto err1;
 
@@ -895,7 +878,8 @@
 	}
 err1:
 	fence_put(f);
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, &ib, NULL);
+	fence_put(f);
 err0:
 	amdgpu_wb_free(adev, index);
 	return r;
@@ -948,7 +932,7 @@
  * Update PTEs by writing them manually using sDMA (CIK).
  */
 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
-				   uint64_t pe,
+				   const dma_addr_t *pages_addr, uint64_t pe,
 				   uint64_t addr, unsigned count,
 				   uint32_t incr, uint32_t flags)
 {
@@ -967,14 +951,7 @@
 		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 		ib->ptr[ib->length_dw++] = ndw;
 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-			if (flags & AMDGPU_PTE_SYSTEM) {
-				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
-				value &= 0xFFFFFFFFFFFFF000ULL;
-			} else if (flags & AMDGPU_PTE_VALID) {
-				value = addr;
-			} else {
-				value = 0;
-			}
+			value = amdgpu_vm_map_gart(pages_addr, addr);
 			addr += incr;
 			value |= flags;
 			ib->ptr[ib->length_dw++] = value;
@@ -1032,14 +1009,14 @@
 }
 
 /**
- * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw
+ * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
  *
  * @ib: indirect buffer to fill with padding
  *
  */
-static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
+static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 	u32 pad_count;
 	int i;
 
@@ -1055,6 +1032,31 @@
 }
 
 /**
+ * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+	uint32_t seq = ring->fence_drv.sync_seq;
+	uint64_t addr = ring->fence_drv.gpu_addr;
+
+	/* wait for idle */
+	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+	amdgpu_ring_write(ring, addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+	amdgpu_ring_write(ring, seq); /* reference */
+	amdgpu_ring_write(ring, 0xfffffff); /* mask */
+	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
  * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
  *
  * @ring: amdgpu_ring pointer
@@ -1275,6 +1277,8 @@
 			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
 		dev_info(adev->dev, "  SDMA%d_GFX_DOORBELL=0x%08X\n",
 			 i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_TILING_CONFIG=0x%08X\n",
+			 i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
 		mutex_lock(&adev->srbm_mutex);
 		for (j = 0; j < 16; j++) {
 			vi_srbm_select(adev, 0, 0, 0, j);
@@ -1570,12 +1574,14 @@
 	.parse_cs = NULL,
 	.emit_ib = sdma_v3_0_ring_emit_ib,
 	.emit_fence = sdma_v3_0_ring_emit_fence,
-	.emit_semaphore = sdma_v3_0_ring_emit_semaphore,
+	.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
 	.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
 	.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
+	.emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
 	.test_ring = sdma_v3_0_ring_test_ring,
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.insert_nop = sdma_v3_0_ring_insert_nop,
+	.pad_ib = sdma_v3_0_ring_pad_ib,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1673,14 +1679,18 @@
 	.copy_pte = sdma_v3_0_vm_copy_pte,
 	.write_pte = sdma_v3_0_vm_write_pte,
 	.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
-	.pad_ib = sdma_v3_0_vm_pad_ib,
 };
 
 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
+	unsigned i;
+
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
-		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
-		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			adev->vm_manager.vm_pte_rings[i] =
+				&adev->sdma.instance[i].ring;
+
+		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 361c49a..083893d 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -272,6 +272,12 @@
 	if (!adev->pm.fw)
 		return -EINVAL;
 
+	/* Skip SMC ucode loading on SR-IOV capable boards.
+	 * vbios does this for us in asic_init in that case.
+	 */
+	if (adev->virtualization.supports_sr_iov)
+		return 0;
+
 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
 	amdgpu_ucode_print_smc_hdr(&hdr->header);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index fbd3767..c606ccb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -164,7 +164,7 @@
 		goto done;
 	}
 
-	r = amdgpu_ring_lock(ring, 10);
+	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 		goto done;
@@ -189,7 +189,7 @@
 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 	amdgpu_ring_write(ring, 3);
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 done:
 	/* lower clocks again */
@@ -439,33 +439,6 @@
 }
 
 /**
- * uvd_v4_2_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
-	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
-	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
-	amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
-	return true;
-}
-
-/**
  * uvd_v4_2_ring_test_ring - register write test
  *
  * @ring: amdgpu_ring pointer
@@ -480,7 +453,7 @@
 	int r;
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-	r = amdgpu_ring_lock(ring, 3);
+	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 			  ring->idx, r);
@@ -488,7 +461,7 @@
 	}
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(mmUVD_CONTEXT_ID);
 		if (tmp == 0xDEADBEEF)
@@ -549,7 +522,7 @@
 		goto error;
 	}
 
-	r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
 		goto error;
@@ -603,6 +576,10 @@
 	addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
 	WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 
+	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
 	uvd_v4_2_init_cg(adev);
 }
 
@@ -804,6 +781,13 @@
 		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
 	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
 		 RREG32(mmUVD_CONTEXT_ID));
+	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+
 }
 
 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
@@ -888,10 +872,10 @@
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
 	.emit_ib = uvd_v4_2_ring_emit_ib,
 	.emit_fence = uvd_v4_2_ring_emit_fence,
-	.emit_semaphore = uvd_v4_2_ring_emit_semaphore,
 	.test_ring = uvd_v4_2_ring_test_ring,
 	.test_ib = uvd_v4_2_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 57f1c5b..e3c852d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -160,7 +160,7 @@
 		goto done;
 	}
 
-	r = amdgpu_ring_lock(ring, 10);
+	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 		goto done;
@@ -185,7 +185,7 @@
 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 	amdgpu_ring_write(ring, 3);
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 done:
 	/* lower clocks again */
@@ -279,6 +279,10 @@
 	size = AMDGPU_UVD_HEAP_SIZE;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 }
 
 /**
@@ -483,33 +487,6 @@
 }
 
 /**
- * uvd_v5_0_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
-	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
-	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
-	amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
-	return true;
-}
-
-/**
  * uvd_v5_0_ring_test_ring - register write test
  *
  * @ring: amdgpu_ring pointer
@@ -524,7 +501,7 @@
 	int r;
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-	r = amdgpu_ring_lock(ring, 3);
+	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 			  ring->idx, r);
@@ -532,7 +509,7 @@
 	}
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(mmUVD_CONTEXT_ID);
 		if (tmp == 0xDEADBEEF)
@@ -595,7 +572,7 @@
 		goto error;
 	}
 
-	r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
 		goto error;
@@ -751,6 +728,12 @@
 		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
 	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
 		 RREG32(mmUVD_CONTEXT_ID));
+	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
 }
 
 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -829,10 +812,10 @@
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
 	.emit_ib = uvd_v5_0_ring_emit_ib,
 	.emit_fence = uvd_v5_0_ring_emit_fence,
-	.emit_semaphore = uvd_v5_0_ring_emit_semaphore,
 	.test_ring = uvd_v5_0_ring_test_ring,
 	.test_ib = uvd_v5_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0b365b7..3375e614 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -157,7 +157,7 @@
 		goto done;
 	}
 
-	r = amdgpu_ring_lock(ring, 10);
+	r = amdgpu_ring_alloc(ring, 10);
 	if (r) {
 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 		goto done;
@@ -182,7 +182,7 @@
 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 	amdgpu_ring_write(ring, 3);
 
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 
 done:
 	if (!r)
@@ -277,6 +277,10 @@
 	size = AMDGPU_UVD_HEAP_SIZE;
 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 }
 
 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
@@ -722,33 +726,6 @@
 }
 
 /**
- * uvd_v6_0_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring,
-					 struct amdgpu_semaphore *semaphore,
-					 bool emit_wait)
-{
-	uint64_t addr = semaphore->gpu_addr;
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
-	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
-	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
-	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
-	amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
-	return true;
-}
-
-/**
  * uvd_v6_0_ring_test_ring - register write test
  *
  * @ring: amdgpu_ring pointer
@@ -763,7 +740,7 @@
 	int r;
 
 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
-	r = amdgpu_ring_lock(ring, 3);
+	r = amdgpu_ring_alloc(ring, 3);
 	if (r) {
 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 			  ring->idx, r);
@@ -771,7 +748,7 @@
 	}
 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 	amdgpu_ring_write(ring, 0xDEADBEEF);
-	amdgpu_ring_unlock_commit(ring);
+	amdgpu_ring_commit(ring);
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(mmUVD_CONTEXT_ID);
 		if (tmp == 0xDEADBEEF)
@@ -827,7 +804,7 @@
 		goto error;
 	}
 
-	r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
 		goto error;
@@ -974,6 +951,12 @@
 		 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
 	dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
 		 RREG32(mmUVD_CONTEXT_ID));
+	dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+	dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+		 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
 }
 
 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1065,10 +1048,10 @@
 	.parse_cs = amdgpu_uvd_ring_parse_cs,
 	.emit_ib = uvd_v6_0_ring_emit_ib,
 	.emit_fence = uvd_v6_0_ring_emit_fence,
-	.emit_semaphore = uvd_v6_0_ring_emit_semaphore,
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = uvd_v6_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index a822eda..c7e885b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -642,10 +642,10 @@
 	.parse_cs = amdgpu_vce_ring_parse_cs,
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
-	.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d662fa9..ce468ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -762,10 +762,10 @@
 	.parse_cs = amdgpu_vce_ring_parse_cs,
 	.emit_ib = amdgpu_vce_ring_emit_ib,
 	.emit_fence = amdgpu_vce_ring_emit_fence,
-	.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
+	.pad_ib = amdgpu_ring_generic_pad_ib,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 0d14d10..1c120ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -74,6 +74,9 @@
 #include "uvd_v6_0.h"
 #include "vce_v3_0.h"
 #include "amdgpu_powerplay.h"
+#if defined(CONFIG_DRM_AMD_ACP)
+#include "amdgpu_acp.h"
+#endif
 
 /*
  * Indirect registers accessor
@@ -571,374 +574,12 @@
 	return -EINVAL;
 }
 
-static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
-{
-	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
-		RREG32(mmGRBM_STATUS));
-	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
-		RREG32(mmGRBM_STATUS2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE0));
-	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE1));
-	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE2));
-	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
-		RREG32(mmGRBM_STATUS_SE3));
-	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
-		RREG32(mmSRBM_STATUS));
-	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
-		RREG32(mmSRBM_STATUS2));
-	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
-		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
-	if (adev->sdma.num_instances > 1) {
-		dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
-			RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
-	}
-	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
-	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT2));
-	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
-		 RREG32(mmCP_STALLED_STAT3));
-	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
-		 RREG32(mmCP_CPF_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPF_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
-	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
-	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
-		 RREG32(mmCP_CPC_STALLED_STAT1));
-	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-}
-
-/**
- * vi_gpu_check_soft_reset - check which blocks are busy
- *
- * @adev: amdgpu_device pointer
- *
- * Check which blocks are busy and return the relevant reset
- * mask to be used by vi_gpu_soft_reset().
- * Returns a mask of the blocks to be reset.
- */
-u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
-{
-	u32 reset_mask = 0;
-	u32 tmp;
-
-	/* GRBM_STATUS */
-	tmp = RREG32(mmGRBM_STATUS);
-	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
-		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
-		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
-		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
-		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
-		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
-		reset_mask |= AMDGPU_RESET_GFX;
-
-	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
-		reset_mask |= AMDGPU_RESET_CP;
-
-	/* GRBM_STATUS2 */
-	tmp = RREG32(mmGRBM_STATUS2);
-	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_RLC;
-
-	if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
-		   GRBM_STATUS2__CPC_BUSY_MASK |
-		   GRBM_STATUS2__CPG_BUSY_MASK))
-		reset_mask |= AMDGPU_RESET_CP;
-
-	/* SRBM_STATUS2 */
-	tmp = RREG32(mmSRBM_STATUS2);
-	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_DMA;
-
-	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_DMA1;
-
-	/* SRBM_STATUS */
-	tmp = RREG32(mmSRBM_STATUS);
-
-	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_IH;
-
-	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_SEM;
-
-	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
-		reset_mask |= AMDGPU_RESET_GRBM;
-
-	if (adev->asic_type != CHIP_TOPAZ) {
-		if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
-			   SRBM_STATUS__UVD_BUSY_MASK))
-			reset_mask |= AMDGPU_RESET_UVD;
-	}
-
-	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
-		reset_mask |= AMDGPU_RESET_VMC;
-
-	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
-		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
-		reset_mask |= AMDGPU_RESET_MC;
-
-	/* SDMA0_STATUS_REG */
-	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
-	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
-		reset_mask |= AMDGPU_RESET_DMA;
-
-	/* SDMA1_STATUS_REG */
-	if (adev->sdma.num_instances > 1) {
-		tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
-		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
-			reset_mask |= AMDGPU_RESET_DMA1;
-	}
-#if 0
-	/* VCE_STATUS */
-	if (adev->asic_type != CHIP_TOPAZ) {
-		tmp = RREG32(mmVCE_STATUS);
-		if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
-			reset_mask |= AMDGPU_RESET_VCE;
-		if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
-			reset_mask |= AMDGPU_RESET_VCE1;
-
-	}
-
-	if (adev->asic_type != CHIP_TOPAZ) {
-		if (amdgpu_display_is_display_hung(adev))
-			reset_mask |= AMDGPU_RESET_DISPLAY;
-	}
-#endif
-
-	/* Skip MC reset as it's mostly likely not hung, just busy */
-	if (reset_mask & AMDGPU_RESET_MC) {
-		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
-		reset_mask &= ~AMDGPU_RESET_MC;
-	}
-
-	return reset_mask;
-}
-
-/**
- * vi_gpu_soft_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- * @reset_mask: mask of which blocks to reset
- *
- * Soft reset the blocks specified in @reset_mask.
- */
-static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
-{
-	struct amdgpu_mode_mc_save save;
-	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
-	u32 tmp;
-
-	if (reset_mask == 0)
-		return;
-
-	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
-	vi_print_gpu_status_regs(adev);
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
-	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
-		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-
-	/* disable CG/PG */
-
-	/* stop the rlc */
-	//XXX
-	//gfx_v8_0_rlc_stop(adev);
-
-	/* Disable GFX parsing/prefetching */
-	tmp = RREG32(mmCP_ME_CNTL);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
-	WREG32(mmCP_ME_CNTL, tmp);
-
-	/* Disable MEC parsing/prefetching */
-	tmp = RREG32(mmCP_MEC_CNTL);
-	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
-	WREG32(mmCP_MEC_CNTL, tmp);
-
-	if (reset_mask & AMDGPU_RESET_DMA) {
-		/* sdma0 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
-		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-	}
-	if (reset_mask & AMDGPU_RESET_DMA1) {
-		/* sdma1 */
-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
-		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-	}
-
-	gmc_v8_0_mc_stop(adev, &save);
-	if (amdgpu_asic_wait_for_mc_idle(adev)) {
-		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
-	}
-
-	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
-		grbm_soft_reset =
-			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
-		grbm_soft_reset =
-			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
-	}
-
-	if (reset_mask & AMDGPU_RESET_CP) {
-		grbm_soft_reset =
-			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
-	}
-
-	if (reset_mask & AMDGPU_RESET_DMA)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
-
-	if (reset_mask & AMDGPU_RESET_DMA1)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
-
-	if (reset_mask & AMDGPU_RESET_DISPLAY)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
-
-	if (reset_mask & AMDGPU_RESET_RLC)
-		grbm_soft_reset =
-			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
-
-	if (reset_mask & AMDGPU_RESET_SEM)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
-
-	if (reset_mask & AMDGPU_RESET_IH)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
-
-	if (reset_mask & AMDGPU_RESET_GRBM)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
-
-	if (reset_mask & AMDGPU_RESET_VMC)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
-
-	if (reset_mask & AMDGPU_RESET_UVD)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
-
-	if (reset_mask & AMDGPU_RESET_VCE)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
-
-	if (reset_mask & AMDGPU_RESET_VCE)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
-
-	if (!(adev->flags & AMD_IS_APU)) {
-		if (reset_mask & AMDGPU_RESET_MC)
-		srbm_soft_reset =
-			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
-	}
-
-	if (grbm_soft_reset) {
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-		tmp |= grbm_soft_reset;
-		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
-		WREG32(mmGRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-
-		udelay(50);
-
-		tmp &= ~grbm_soft_reset;
-		WREG32(mmGRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmGRBM_SOFT_RESET);
-	}
-
-	if (srbm_soft_reset) {
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-		tmp |= srbm_soft_reset;
-		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-		WREG32(mmSRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-
-		udelay(50);
-
-		tmp &= ~srbm_soft_reset;
-		WREG32(mmSRBM_SOFT_RESET, tmp);
-		tmp = RREG32(mmSRBM_SOFT_RESET);
-	}
-
-	/* Wait a little for things to settle down */
-	udelay(50);
-
-	gmc_v8_0_mc_resume(adev, &save);
-	udelay(50);
-
-	vi_print_gpu_status_regs(adev);
-}
-
 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
-	struct amdgpu_mode_mc_save save;
-	u32 tmp, i;
+	u32 i;
 
 	dev_info(adev->dev, "GPU pci config reset\n");
 
-	/* disable dpm? */
-
-	/* disable cg/pg */
-
-	/* Disable GFX parsing/prefetching */
-	tmp = RREG32(mmCP_ME_CNTL);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
-	WREG32(mmCP_ME_CNTL, tmp);
-
-	/* Disable MEC parsing/prefetching */
-	tmp = RREG32(mmCP_MEC_CNTL);
-	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
-	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
-	WREG32(mmCP_MEC_CNTL, tmp);
-
-	/* Disable GFX parsing/prefetching */
-	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
-		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
-	/* Disable MEC parsing/prefetching */
-	WREG32(mmCP_MEC_CNTL,
-			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
-	/* sdma0 */
-	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
-	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
-	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-
-	/* sdma1 */
-	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
-	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
-	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-
-	/* XXX other engines? */
-
-	/* halt the rlc, disable cp internal ints */
-	//XXX
-	//gfx_v8_0_rlc_stop(adev);
-
-	udelay(50);
-
-	/* disable mem access */
-	gmc_v8_0_mc_stop(adev, &save);
-	if (amdgpu_asic_wait_for_mc_idle(adev)) {
-		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
-	}
-
 	/* disable BM */
 	pci_clear_master(adev->pdev);
 	/* reset */
@@ -978,26 +619,11 @@
  */
 static int vi_asic_reset(struct amdgpu_device *adev)
 {
-	u32 reset_mask;
+	vi_set_bios_scratch_engine_hung(adev, true);
 
-	reset_mask = vi_gpu_check_soft_reset(adev);
+	vi_gpu_pci_config_reset(adev);
 
-	if (reset_mask)
-		vi_set_bios_scratch_engine_hung(adev, true);
-
-	/* try soft reset */
-	vi_gpu_soft_reset(adev, reset_mask);
-
-	reset_mask = vi_gpu_check_soft_reset(adev);
-
-	/* try pci config reset */
-	if (reset_mask && amdgpu_hard_reset)
-		vi_gpu_pci_config_reset(adev);
-
-	reset_mask = vi_gpu_check_soft_reset(adev);
-
-	if (!reset_mask)
-		vi_set_bios_scratch_engine_hung(adev, false);
+	vi_set_bios_scratch_engine_hung(adev, false);
 
 	return 0;
 }
@@ -1347,6 +973,15 @@
 		.rev = 0,
 		.funcs = &vce_v3_0_ip_funcs,
 	},
+#if defined(CONFIG_DRM_AMD_ACP)
+	{
+		.type = AMD_IP_BLOCK_TYPE_ACP,
+		.major = 2,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &acp_ip_funcs,
+	},
+#endif
 };
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
@@ -1436,26 +1071,22 @@
 	adev->external_rev_id = 0xFF;
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
-		adev->has_uvd = false;
 		adev->cg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = 0x1;
 		break;
 	case CHIP_FIJI:
-		adev->has_uvd = true;
 		adev->cg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x3c;
 		break;
 	case CHIP_TONGA:
-		adev->has_uvd = true;
 		adev->cg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x14;
 		break;
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
-		adev->has_uvd = true;
 		adev->cg_flags = 0;
 		adev->pg_flags = 0;
 		adev->external_rev_id = adev->rev_id + 0x1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index d98aa9d..ace4997 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -71,8 +71,6 @@
 #define		VMID(x)						((x) << 4)
 #define		QUEUEID(x)					((x) << 8)
 
-#define RB_BITMAP_WIDTH_PER_SH     2
-
 #define MC_SEQ_MISC0__MT__MASK	0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
 #define MC_SEQ_MISC0__MT__DDR2   0x20000000
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c34c393..d5e19b5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@
 				union SQ_CMD_BITS *in_reg_sq_cmd,
 				union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
 {
-	int status;
+	int status = 0;
 	union SQ_CMD_BITS reg_sq_cmd;
 	union GRBM_GFX_INDEX_BITS reg_gfx_index;
 	struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index ca8410e..850a562 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -59,18 +59,23 @@
 MODULE_PARM_DESC(send_sigterm,
 	"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
 
-bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
+static int amdkfd_init_completed;
+
+int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
 {
+	if (!amdkfd_init_completed)
+		return -EPROBE_DEFER;
+
 	/*
 	 * Only one interface version is supported,
 	 * no kfd/kgd version skew allowed.
 	 */
 	if (interface_version != KFD_INTERFACE_VERSION)
-		return false;
+		return -EINVAL;
 
 	*g2f = &kgd2kfd;
 
-	return true;
+	return 0;
 }
 EXPORT_SYMBOL(kgd2kfd_init);
 
@@ -111,6 +116,8 @@
 
 	kfd_process_create_wq();
 
+	amdkfd_init_completed = 1;
+
 	dev_info(kfd_device, "Initialized module\n");
 
 	return 0;
@@ -125,6 +132,8 @@
 
 static void __exit kfd_module_exit(void)
 {
+	amdkfd_init_completed = 0;
+
 	kfd_process_destroy_wq();
 	kfd_topology_shutdown();
 	kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 496360e..50e8933 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -340,6 +340,8 @@
 #       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
 #       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
 #       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#       define ATPX_DGPU_CAN_DRIVE_DISPLAYS                        (1 << 12)
+#       define ATPX_MS_HYBRID_GFX_SUPPORTED                        (1 << 14)
 #define ATPX_FUNCTION_POWER_CONTROL                                0x2
 /* ARG0: ATPX_FUNCTION_POWER_CONTROL
  * ARG1:
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index dbf7e64..04e40906 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -73,6 +73,7 @@
 	AMD_IP_BLOCK_TYPE_SDMA,
 	AMD_IP_BLOCK_TYPE_UVD,
 	AMD_IP_BLOCK_TYPE_VCE,
+	AMD_IP_BLOCK_TYPE_ACP,
 };
 
 enum amd_clockgating_state {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
index dc52ea0..d3ccf5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
@@ -1379,6 +1379,7 @@
 #define mmDC_GPIO_PAD_STRENGTH_1                                                0x1978
 #define mmDC_GPIO_PAD_STRENGTH_2                                                0x1979
 #define mmPHY_AUX_CNTL                                                          0x197f
+#define mmDC_GPIO_I2CPAD_MASK                                                   0x1974
 #define mmDC_GPIO_I2CPAD_A                                                      0x1975
 #define mmDC_GPIO_I2CPAD_EN                                                     0x1976
 #define mmDC_GPIO_I2CPAD_Y                                                      0x1977
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h
new file mode 100644
index 0000000..6bea30e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h
@@ -0,0 +1,1117 @@
+/*
+ * DCE_8_0 Register documentation
+ *
+ * Copyright (C) 2016  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_8_0_ENUM_H
+#define DCE_8_0_ENUM_H
+
+typedef enum SurfaceEndian {
+	ENDIAN_NONE                                      = 0x0,
+	ENDIAN_8IN16                                     = 0x1,
+	ENDIAN_8IN32                                     = 0x2,
+	ENDIAN_8IN64                                     = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+	ARRAY_LINEAR_GENERAL                             = 0x0,
+	ARRAY_LINEAR_ALIGNED                             = 0x1,
+	ARRAY_1D_TILED_THIN1                             = 0x2,
+	ARRAY_1D_TILED_THICK                             = 0x3,
+	ARRAY_2D_TILED_THIN1                             = 0x4,
+	ARRAY_PRT_TILED_THIN1                            = 0x5,
+	ARRAY_PRT_2D_TILED_THIN1                         = 0x6,
+	ARRAY_2D_TILED_THICK                             = 0x7,
+	ARRAY_2D_TILED_XTHICK                            = 0x8,
+	ARRAY_PRT_TILED_THICK                            = 0x9,
+	ARRAY_PRT_2D_TILED_THICK                         = 0xa,
+	ARRAY_PRT_3D_TILED_THIN1                         = 0xb,
+	ARRAY_3D_TILED_THIN1                             = 0xc,
+	ARRAY_3D_TILED_THICK                             = 0xd,
+	ARRAY_3D_TILED_XTHICK                            = 0xe,
+	ARRAY_PRT_3D_TILED_THICK                         = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+	CONFIG_1_PIPE                                    = 0x0,
+	CONFIG_2_PIPE                                    = 0x1,
+	CONFIG_4_PIPE                                    = 0x2,
+	CONFIG_8_PIPE                                    = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+	CONFIG_4_BANK                                    = 0x0,
+	CONFIG_8_BANK                                    = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+	CONFIG_256B_GROUP                                = 0x0,
+	CONFIG_512B_GROUP                                = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+	CONFIG_1KB_ROW                                   = 0x0,
+	CONFIG_2KB_ROW                                   = 0x1,
+	CONFIG_4KB_ROW                                   = 0x2,
+	CONFIG_8KB_ROW                                   = 0x3,
+	CONFIG_1KB_ROW_OPT                               = 0x4,
+	CONFIG_2KB_ROW_OPT                               = 0x5,
+	CONFIG_4KB_ROW_OPT                               = 0x6,
+	CONFIG_8KB_ROW_OPT                               = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+	CONFIG_128B_SWAPS                                = 0x0,
+	CONFIG_256B_SWAPS                                = 0x1,
+	CONFIG_512B_SWAPS                                = 0x2,
+	CONFIG_1KB_SWAPS                                 = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+	CONFIG_1KB_SPLIT                                 = 0x0,
+	CONFIG_2KB_SPLIT                                 = 0x1,
+	CONFIG_4KB_SPLIT                                 = 0x2,
+	CONFIG_8KB_SPLIT                                 = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+	ADDR_CONFIG_1_PIPE                               = 0x0,
+	ADDR_CONFIG_2_PIPE                               = 0x1,
+	ADDR_CONFIG_4_PIPE                               = 0x2,
+	ADDR_CONFIG_8_PIPE                               = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+	ADDR_CONFIG_PIPE_INTERLEAVE_256B                 = 0x0,
+	ADDR_CONFIG_PIPE_INTERLEAVE_512B                 = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+	ADDR_CONFIG_BANK_INTERLEAVE_1                    = 0x0,
+	ADDR_CONFIG_BANK_INTERLEAVE_2                    = 0x1,
+	ADDR_CONFIG_BANK_INTERLEAVE_4                    = 0x2,
+	ADDR_CONFIG_BANK_INTERLEAVE_8                    = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+	ADDR_CONFIG_1_SHADER_ENGINE                      = 0x0,
+	ADDR_CONFIG_2_SHADER_ENGINE                      = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+	ADDR_CONFIG_SE_TILE_16                           = 0x0,
+	ADDR_CONFIG_SE_TILE_32                           = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+	ADDR_CONFIG_1_GPU                                = 0x0,
+	ADDR_CONFIG_2_GPU                                = 0x1,
+	ADDR_CONFIG_4_GPU                                = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+	ADDR_CONFIG_GPU_TILE_16                          = 0x0,
+	ADDR_CONFIG_GPU_TILE_32                          = 0x1,
+	ADDR_CONFIG_GPU_TILE_64                          = 0x2,
+	ADDR_CONFIG_GPU_TILE_128                         = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+	ADDR_CONFIG_1KB_ROW                              = 0x0,
+	ADDR_CONFIG_2KB_ROW                              = 0x1,
+	ADDR_CONFIG_4KB_ROW                              = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+	ADDR_CONFIG_1_LOWER_PIPES                        = 0x0,
+	ADDR_CONFIG_2_LOWER_PIPES                        = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+	DBG_CLIENT_BLKID_RESERVED                        = 0x0,
+	DBG_CLIENT_BLKID_dbg                             = 0x1,
+	DBG_CLIENT_BLKID_uvdu_0                          = 0x2,
+	DBG_CLIENT_BLKID_uvdu_1                          = 0x3,
+	DBG_CLIENT_BLKID_uvdu_2                          = 0x4,
+	DBG_CLIENT_BLKID_uvdu_3                          = 0x5,
+	DBG_CLIENT_BLKID_uvdu_4                          = 0x6,
+	DBG_CLIENT_BLKID_uvdu_5                          = 0x7,
+	DBG_CLIENT_BLKID_uvdu_6                          = 0x8,
+	DBG_CLIENT_BLKID_uvdm_0                          = 0x9,
+	DBG_CLIENT_BLKID_uvdm_1                          = 0xa,
+	DBG_CLIENT_BLKID_uvdm_2                          = 0xb,
+	DBG_CLIENT_BLKID_uvdm_3                          = 0xc,
+	DBG_CLIENT_BLKID_vcea_0                          = 0xd,
+	DBG_CLIENT_BLKID_vcea_1                          = 0xe,
+	DBG_CLIENT_BLKID_vcea_2                          = 0xf,
+	DBG_CLIENT_BLKID_vcea_3                          = 0x10,
+	DBG_CLIENT_BLKID_vcea_4                          = 0x11,
+	DBG_CLIENT_BLKID_vcea_5                          = 0x12,
+	DBG_CLIENT_BLKID_vcea_6                          = 0x13,
+	DBG_CLIENT_BLKID_vceb_0                          = 0x14,
+	DBG_CLIENT_BLKID_vceb_1                          = 0x15,
+	DBG_CLIENT_BLKID_vceb_2                          = 0x16,
+	DBG_CLIENT_BLKID_dco                             = 0x17,
+	DBG_CLIENT_BLKID_xdma                            = 0x18,
+	DBG_CLIENT_BLKID_smu_0                           = 0x19,
+	DBG_CLIENT_BLKID_smu_1                           = 0x1a,
+	DBG_CLIENT_BLKID_smu_2                           = 0x1b,
+	DBG_CLIENT_BLKID_gck                             = 0x1c,
+	DBG_CLIENT_BLKID_tmonw0                          = 0x1d,
+	DBG_CLIENT_BLKID_tmonw1                          = 0x1e,
+	DBG_CLIENT_BLKID_grbm                            = 0x1f,
+	DBG_CLIENT_BLKID_rlc                             = 0x20,
+	DBG_CLIENT_BLKID_ds0                             = 0x21,
+	DBG_CLIENT_BLKID_cpg_0                           = 0x22,
+	DBG_CLIENT_BLKID_cpg_1                           = 0x23,
+	DBG_CLIENT_BLKID_cpc_0                           = 0x24,
+	DBG_CLIENT_BLKID_cpc_1                           = 0x25,
+	DBG_CLIENT_BLKID_cpf                             = 0x26,
+	DBG_CLIENT_BLKID_scf0                            = 0x27,
+	DBG_CLIENT_BLKID_scf1                            = 0x28,
+	DBG_CLIENT_BLKID_scf2                            = 0x29,
+	DBG_CLIENT_BLKID_scf3                            = 0x2a,
+	DBG_CLIENT_BLKID_pc0                             = 0x2b,
+	DBG_CLIENT_BLKID_pc1                             = 0x2c,
+	DBG_CLIENT_BLKID_pc2                             = 0x2d,
+	DBG_CLIENT_BLKID_pc3                             = 0x2e,
+	DBG_CLIENT_BLKID_vgt0                            = 0x2f,
+	DBG_CLIENT_BLKID_vgt1                            = 0x30,
+	DBG_CLIENT_BLKID_vgt2                            = 0x31,
+	DBG_CLIENT_BLKID_vgt3                            = 0x32,
+	DBG_CLIENT_BLKID_sx00                            = 0x33,
+	DBG_CLIENT_BLKID_sx10                            = 0x34,
+	DBG_CLIENT_BLKID_sx20                            = 0x35,
+	DBG_CLIENT_BLKID_sx30                            = 0x36,
+	DBG_CLIENT_BLKID_cb001                           = 0x37,
+	DBG_CLIENT_BLKID_cb200                           = 0x38,
+	DBG_CLIENT_BLKID_cb201                           = 0x39,
+	DBG_CLIENT_BLKID_cbr0                            = 0x3a,
+	DBG_CLIENT_BLKID_cb000                           = 0x3b,
+	DBG_CLIENT_BLKID_cb101                           = 0x3c,
+	DBG_CLIENT_BLKID_cb300                           = 0x3d,
+	DBG_CLIENT_BLKID_cb301                           = 0x3e,
+	DBG_CLIENT_BLKID_cbr1                            = 0x3f,
+	DBG_CLIENT_BLKID_cb100                           = 0x40,
+	DBG_CLIENT_BLKID_ia0                             = 0x41,
+	DBG_CLIENT_BLKID_ia1                             = 0x42,
+	DBG_CLIENT_BLKID_bci0                            = 0x43,
+	DBG_CLIENT_BLKID_bci1                            = 0x44,
+	DBG_CLIENT_BLKID_bci2                            = 0x45,
+	DBG_CLIENT_BLKID_bci3                            = 0x46,
+	DBG_CLIENT_BLKID_pa0                             = 0x47,
+	DBG_CLIENT_BLKID_pa1                             = 0x48,
+	DBG_CLIENT_BLKID_spim0                           = 0x49,
+	DBG_CLIENT_BLKID_spim1                           = 0x4a,
+	DBG_CLIENT_BLKID_spim2                           = 0x4b,
+	DBG_CLIENT_BLKID_spim3                           = 0x4c,
+	DBG_CLIENT_BLKID_sdma                            = 0x4d,
+	DBG_CLIENT_BLKID_ih                              = 0x4e,
+	DBG_CLIENT_BLKID_sem                             = 0x4f,
+	DBG_CLIENT_BLKID_srbm                            = 0x50,
+	DBG_CLIENT_BLKID_hdp                             = 0x51,
+	DBG_CLIENT_BLKID_acp_0                           = 0x52,
+	DBG_CLIENT_BLKID_acp_1                           = 0x53,
+	DBG_CLIENT_BLKID_sam                             = 0x54,
+	DBG_CLIENT_BLKID_mcc0                            = 0x55,
+	DBG_CLIENT_BLKID_mcc1                            = 0x56,
+	DBG_CLIENT_BLKID_mcc2                            = 0x57,
+	DBG_CLIENT_BLKID_mcc3                            = 0x58,
+	DBG_CLIENT_BLKID_mcd0                            = 0x59,
+	DBG_CLIENT_BLKID_mcd1                            = 0x5a,
+	DBG_CLIENT_BLKID_mcd2                            = 0x5b,
+	DBG_CLIENT_BLKID_mcd3                            = 0x5c,
+	DBG_CLIENT_BLKID_mcb                             = 0x5d,
+	DBG_CLIENT_BLKID_vmc                             = 0x5e,
+	DBG_CLIENT_BLKID_gmcon                           = 0x5f,
+	DBG_CLIENT_BLKID_gdc_0                           = 0x60,
+	DBG_CLIENT_BLKID_gdc_1                           = 0x61,
+	DBG_CLIENT_BLKID_gdc_2                           = 0x62,
+	DBG_CLIENT_BLKID_gdc_3                           = 0x63,
+	DBG_CLIENT_BLKID_gdc_4                           = 0x64,
+	DBG_CLIENT_BLKID_gdc_5                           = 0x65,
+	DBG_CLIENT_BLKID_gdc_6                           = 0x66,
+	DBG_CLIENT_BLKID_gdc_7                           = 0x67,
+	DBG_CLIENT_BLKID_gdc_8                           = 0x68,
+	DBG_CLIENT_BLKID_gdc_9                           = 0x69,
+	DBG_CLIENT_BLKID_gdc_10                          = 0x6a,
+	DBG_CLIENT_BLKID_gdc_11                          = 0x6b,
+	DBG_CLIENT_BLKID_gdc_12                          = 0x6c,
+	DBG_CLIENT_BLKID_gdc_13                          = 0x6d,
+	DBG_CLIENT_BLKID_gdc_14                          = 0x6e,
+	DBG_CLIENT_BLKID_gdc_15                          = 0x6f,
+	DBG_CLIENT_BLKID_gdc_16                          = 0x70,
+	DBG_CLIENT_BLKID_gdc_17                          = 0x71,
+	DBG_CLIENT_BLKID_gdc_18                          = 0x72,
+	DBG_CLIENT_BLKID_gdc_19                          = 0x73,
+	DBG_CLIENT_BLKID_gdc_20                          = 0x74,
+	DBG_CLIENT_BLKID_gdc_21                          = 0x75,
+	DBG_CLIENT_BLKID_gdc_22                          = 0x76,
+	DBG_CLIENT_BLKID_wd                              = 0x77,
+	DBG_CLIENT_BLKID_sdma_0                          = 0x78,
+	DBG_CLIENT_BLKID_sdma_1                          = 0x79,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+	DBG_BLOCK_ID_RESERVED                            = 0x0,
+	DBG_BLOCK_ID_DBG                                 = 0x1,
+	DBG_BLOCK_ID_VMC                                 = 0x2,
+	DBG_BLOCK_ID_PDMA                                = 0x3,
+	DBG_BLOCK_ID_CG                                  = 0x4,
+	DBG_BLOCK_ID_SRBM                                = 0x5,
+	DBG_BLOCK_ID_GRBM                                = 0x6,
+	DBG_BLOCK_ID_RLC                                 = 0x7,
+	DBG_BLOCK_ID_CSC                                 = 0x8,
+	DBG_BLOCK_ID_SEM                                 = 0x9,
+	DBG_BLOCK_ID_IH                                  = 0xa,
+	DBG_BLOCK_ID_SC                                  = 0xb,
+	DBG_BLOCK_ID_SQ                                  = 0xc,
+	DBG_BLOCK_ID_AVP                                 = 0xd,
+	DBG_BLOCK_ID_GMCON                               = 0xe,
+	DBG_BLOCK_ID_SMU                                 = 0xf,
+	DBG_BLOCK_ID_DMA0                                = 0x10,
+	DBG_BLOCK_ID_DMA1                                = 0x11,
+	DBG_BLOCK_ID_SPIM                                = 0x12,
+	DBG_BLOCK_ID_GDS                                 = 0x13,
+	DBG_BLOCK_ID_SPIS                                = 0x14,
+	DBG_BLOCK_ID_UNUSED0                             = 0x15,
+	DBG_BLOCK_ID_PA0                                 = 0x16,
+	DBG_BLOCK_ID_PA1                                 = 0x17,
+	DBG_BLOCK_ID_CP0                                 = 0x18,
+	DBG_BLOCK_ID_CP1                                 = 0x19,
+	DBG_BLOCK_ID_CP2                                 = 0x1a,
+	DBG_BLOCK_ID_UNUSED1                             = 0x1b,
+	DBG_BLOCK_ID_UVDU                                = 0x1c,
+	DBG_BLOCK_ID_UVDM                                = 0x1d,
+	DBG_BLOCK_ID_VCE                                 = 0x1e,
+	DBG_BLOCK_ID_UNUSED2                             = 0x1f,
+	DBG_BLOCK_ID_VGT0                                = 0x20,
+	DBG_BLOCK_ID_VGT1                                = 0x21,
+	DBG_BLOCK_ID_IA                                  = 0x22,
+	DBG_BLOCK_ID_UNUSED3                             = 0x23,
+	DBG_BLOCK_ID_SCT0                                = 0x24,
+	DBG_BLOCK_ID_SCT1                                = 0x25,
+	DBG_BLOCK_ID_SPM0                                = 0x26,
+	DBG_BLOCK_ID_SPM1                                = 0x27,
+	DBG_BLOCK_ID_TCAA                                = 0x28,
+	DBG_BLOCK_ID_TCAB                                = 0x29,
+	DBG_BLOCK_ID_TCCA                                = 0x2a,
+	DBG_BLOCK_ID_TCCB                                = 0x2b,
+	DBG_BLOCK_ID_MCC0                                = 0x2c,
+	DBG_BLOCK_ID_MCC1                                = 0x2d,
+	DBG_BLOCK_ID_MCC2                                = 0x2e,
+	DBG_BLOCK_ID_MCC3                                = 0x2f,
+	DBG_BLOCK_ID_SX0                                 = 0x30,
+	DBG_BLOCK_ID_SX1                                 = 0x31,
+	DBG_BLOCK_ID_SX2                                 = 0x32,
+	DBG_BLOCK_ID_SX3                                 = 0x33,
+	DBG_BLOCK_ID_UNUSED4                             = 0x34,
+	DBG_BLOCK_ID_UNUSED5                             = 0x35,
+	DBG_BLOCK_ID_UNUSED6                             = 0x36,
+	DBG_BLOCK_ID_UNUSED7                             = 0x37,
+	DBG_BLOCK_ID_PC0                                 = 0x38,
+	DBG_BLOCK_ID_PC1                                 = 0x39,
+	DBG_BLOCK_ID_UNUSED8                             = 0x3a,
+	DBG_BLOCK_ID_UNUSED9                             = 0x3b,
+	DBG_BLOCK_ID_UNUSED10                            = 0x3c,
+	DBG_BLOCK_ID_UNUSED11                            = 0x3d,
+	DBG_BLOCK_ID_MCB                                 = 0x3e,
+	DBG_BLOCK_ID_UNUSED12                            = 0x3f,
+	DBG_BLOCK_ID_SCB0                                = 0x40,
+	DBG_BLOCK_ID_SCB1                                = 0x41,
+	DBG_BLOCK_ID_UNUSED13                            = 0x42,
+	DBG_BLOCK_ID_UNUSED14                            = 0x43,
+	DBG_BLOCK_ID_SCF0                                = 0x44,
+	DBG_BLOCK_ID_SCF1                                = 0x45,
+	DBG_BLOCK_ID_UNUSED15                            = 0x46,
+	DBG_BLOCK_ID_UNUSED16                            = 0x47,
+	DBG_BLOCK_ID_BCI0                                = 0x48,
+	DBG_BLOCK_ID_BCI1                                = 0x49,
+	DBG_BLOCK_ID_BCI2                                = 0x4a,
+	DBG_BLOCK_ID_BCI3                                = 0x4b,
+	DBG_BLOCK_ID_UNUSED17                            = 0x4c,
+	DBG_BLOCK_ID_UNUSED18                            = 0x4d,
+	DBG_BLOCK_ID_UNUSED19                            = 0x4e,
+	DBG_BLOCK_ID_UNUSED20                            = 0x4f,
+	DBG_BLOCK_ID_CB00                                = 0x50,
+	DBG_BLOCK_ID_CB01                                = 0x51,
+	DBG_BLOCK_ID_CB02                                = 0x52,
+	DBG_BLOCK_ID_CB03                                = 0x53,
+	DBG_BLOCK_ID_CB04                                = 0x54,
+	DBG_BLOCK_ID_UNUSED21                            = 0x55,
+	DBG_BLOCK_ID_UNUSED22                            = 0x56,
+	DBG_BLOCK_ID_UNUSED23                            = 0x57,
+	DBG_BLOCK_ID_CB10                                = 0x58,
+	DBG_BLOCK_ID_CB11                                = 0x59,
+	DBG_BLOCK_ID_CB12                                = 0x5a,
+	DBG_BLOCK_ID_CB13                                = 0x5b,
+	DBG_BLOCK_ID_CB14                                = 0x5c,
+	DBG_BLOCK_ID_UNUSED24                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED25                            = 0x5e,
+	DBG_BLOCK_ID_UNUSED26                            = 0x5f,
+	DBG_BLOCK_ID_TCP0                                = 0x60,
+	DBG_BLOCK_ID_TCP1                                = 0x61,
+	DBG_BLOCK_ID_TCP2                                = 0x62,
+	DBG_BLOCK_ID_TCP3                                = 0x63,
+	DBG_BLOCK_ID_TCP4                                = 0x64,
+	DBG_BLOCK_ID_TCP5                                = 0x65,
+	DBG_BLOCK_ID_TCP6                                = 0x66,
+	DBG_BLOCK_ID_TCP7                                = 0x67,
+	DBG_BLOCK_ID_TCP8                                = 0x68,
+	DBG_BLOCK_ID_TCP9                                = 0x69,
+	DBG_BLOCK_ID_TCP10                               = 0x6a,
+	DBG_BLOCK_ID_TCP11                               = 0x6b,
+	DBG_BLOCK_ID_TCP12                               = 0x6c,
+	DBG_BLOCK_ID_TCP13                               = 0x6d,
+	DBG_BLOCK_ID_TCP14                               = 0x6e,
+	DBG_BLOCK_ID_TCP15                               = 0x6f,
+	DBG_BLOCK_ID_TCP16                               = 0x70,
+	DBG_BLOCK_ID_TCP17                               = 0x71,
+	DBG_BLOCK_ID_TCP18                               = 0x72,
+	DBG_BLOCK_ID_TCP19                               = 0x73,
+	DBG_BLOCK_ID_TCP20                               = 0x74,
+	DBG_BLOCK_ID_TCP21                               = 0x75,
+	DBG_BLOCK_ID_TCP22                               = 0x76,
+	DBG_BLOCK_ID_TCP23                               = 0x77,
+	DBG_BLOCK_ID_TCP_RESERVED0                       = 0x78,
+	DBG_BLOCK_ID_TCP_RESERVED1                       = 0x79,
+	DBG_BLOCK_ID_TCP_RESERVED2                       = 0x7a,
+	DBG_BLOCK_ID_TCP_RESERVED3                       = 0x7b,
+	DBG_BLOCK_ID_TCP_RESERVED4                       = 0x7c,
+	DBG_BLOCK_ID_TCP_RESERVED5                       = 0x7d,
+	DBG_BLOCK_ID_TCP_RESERVED6                       = 0x7e,
+	DBG_BLOCK_ID_TCP_RESERVED7                       = 0x7f,
+	DBG_BLOCK_ID_DB00                                = 0x80,
+	DBG_BLOCK_ID_DB01                                = 0x81,
+	DBG_BLOCK_ID_DB02                                = 0x82,
+	DBG_BLOCK_ID_DB03                                = 0x83,
+	DBG_BLOCK_ID_DB04                                = 0x84,
+	DBG_BLOCK_ID_UNUSED27                            = 0x85,
+	DBG_BLOCK_ID_UNUSED28                            = 0x86,
+	DBG_BLOCK_ID_UNUSED29                            = 0x87,
+	DBG_BLOCK_ID_DB10                                = 0x88,
+	DBG_BLOCK_ID_DB11                                = 0x89,
+	DBG_BLOCK_ID_DB12                                = 0x8a,
+	DBG_BLOCK_ID_DB13                                = 0x8b,
+	DBG_BLOCK_ID_DB14                                = 0x8c,
+	DBG_BLOCK_ID_UNUSED30                            = 0x8d,
+	DBG_BLOCK_ID_UNUSED31                            = 0x8e,
+	DBG_BLOCK_ID_UNUSED32                            = 0x8f,
+	DBG_BLOCK_ID_TCC0                                = 0x90,
+	DBG_BLOCK_ID_TCC1                                = 0x91,
+	DBG_BLOCK_ID_TCC2                                = 0x92,
+	DBG_BLOCK_ID_TCC3                                = 0x93,
+	DBG_BLOCK_ID_TCC4                                = 0x94,
+	DBG_BLOCK_ID_TCC5                                = 0x95,
+	DBG_BLOCK_ID_TCC6                                = 0x96,
+	DBG_BLOCK_ID_TCC7                                = 0x97,
+	DBG_BLOCK_ID_SPS00                               = 0x98,
+	DBG_BLOCK_ID_SPS01                               = 0x99,
+	DBG_BLOCK_ID_SPS02                               = 0x9a,
+	DBG_BLOCK_ID_SPS10                               = 0x9b,
+	DBG_BLOCK_ID_SPS11                               = 0x9c,
+	DBG_BLOCK_ID_SPS12                               = 0x9d,
+	DBG_BLOCK_ID_UNUSED33                            = 0x9e,
+	DBG_BLOCK_ID_UNUSED34                            = 0x9f,
+	DBG_BLOCK_ID_TA00                                = 0xa0,
+	DBG_BLOCK_ID_TA01                                = 0xa1,
+	DBG_BLOCK_ID_TA02                                = 0xa2,
+	DBG_BLOCK_ID_TA03                                = 0xa3,
+	DBG_BLOCK_ID_TA04                                = 0xa4,
+	DBG_BLOCK_ID_TA05                                = 0xa5,
+	DBG_BLOCK_ID_TA06                                = 0xa6,
+	DBG_BLOCK_ID_TA07                                = 0xa7,
+	DBG_BLOCK_ID_TA08                                = 0xa8,
+	DBG_BLOCK_ID_TA09                                = 0xa9,
+	DBG_BLOCK_ID_TA0A                                = 0xaa,
+	DBG_BLOCK_ID_TA0B                                = 0xab,
+	DBG_BLOCK_ID_UNUSED35                            = 0xac,
+	DBG_BLOCK_ID_UNUSED36                            = 0xad,
+	DBG_BLOCK_ID_UNUSED37                            = 0xae,
+	DBG_BLOCK_ID_UNUSED38                            = 0xaf,
+	DBG_BLOCK_ID_TA10                                = 0xb0,
+	DBG_BLOCK_ID_TA11                                = 0xb1,
+	DBG_BLOCK_ID_TA12                                = 0xb2,
+	DBG_BLOCK_ID_TA13                                = 0xb3,
+	DBG_BLOCK_ID_TA14                                = 0xb4,
+	DBG_BLOCK_ID_TA15                                = 0xb5,
+	DBG_BLOCK_ID_TA16                                = 0xb6,
+	DBG_BLOCK_ID_TA17                                = 0xb7,
+	DBG_BLOCK_ID_TA18                                = 0xb8,
+	DBG_BLOCK_ID_TA19                                = 0xb9,
+	DBG_BLOCK_ID_TA1A                                = 0xba,
+	DBG_BLOCK_ID_TA1B                                = 0xbb,
+	DBG_BLOCK_ID_UNUSED39                            = 0xbc,
+	DBG_BLOCK_ID_UNUSED40                            = 0xbd,
+	DBG_BLOCK_ID_UNUSED41                            = 0xbe,
+	DBG_BLOCK_ID_UNUSED42                            = 0xbf,
+	DBG_BLOCK_ID_TD00                                = 0xc0,
+	DBG_BLOCK_ID_TD01                                = 0xc1,
+	DBG_BLOCK_ID_TD02                                = 0xc2,
+	DBG_BLOCK_ID_TD03                                = 0xc3,
+	DBG_BLOCK_ID_TD04                                = 0xc4,
+	DBG_BLOCK_ID_TD05                                = 0xc5,
+	DBG_BLOCK_ID_TD06                                = 0xc6,
+	DBG_BLOCK_ID_TD07                                = 0xc7,
+	DBG_BLOCK_ID_TD08                                = 0xc8,
+	DBG_BLOCK_ID_TD09                                = 0xc9,
+	DBG_BLOCK_ID_TD0A                                = 0xca,
+	DBG_BLOCK_ID_TD0B                                = 0xcb,
+	DBG_BLOCK_ID_UNUSED43                            = 0xcc,
+	DBG_BLOCK_ID_UNUSED44                            = 0xcd,
+	DBG_BLOCK_ID_UNUSED45                            = 0xce,
+	DBG_BLOCK_ID_UNUSED46                            = 0xcf,
+	DBG_BLOCK_ID_TD10                                = 0xd0,
+	DBG_BLOCK_ID_TD11                                = 0xd1,
+	DBG_BLOCK_ID_TD12                                = 0xd2,
+	DBG_BLOCK_ID_TD13                                = 0xd3,
+	DBG_BLOCK_ID_TD14                                = 0xd4,
+	DBG_BLOCK_ID_TD15                                = 0xd5,
+	DBG_BLOCK_ID_TD16                                = 0xd6,
+	DBG_BLOCK_ID_TD17                                = 0xd7,
+	DBG_BLOCK_ID_TD18                                = 0xd8,
+	DBG_BLOCK_ID_TD19                                = 0xd9,
+	DBG_BLOCK_ID_TD1A                                = 0xda,
+	DBG_BLOCK_ID_TD1B                                = 0xdb,
+	DBG_BLOCK_ID_UNUSED47                            = 0xdc,
+	DBG_BLOCK_ID_UNUSED48                            = 0xdd,
+	DBG_BLOCK_ID_UNUSED49                            = 0xde,
+	DBG_BLOCK_ID_UNUSED50                            = 0xdf,
+	DBG_BLOCK_ID_MCD0                                = 0xe0,
+	DBG_BLOCK_ID_MCD1                                = 0xe1,
+	DBG_BLOCK_ID_MCD2                                = 0xe2,
+	DBG_BLOCK_ID_MCD3                                = 0xe3,
+	DBG_BLOCK_ID_MCD4                                = 0xe4,
+	DBG_BLOCK_ID_MCD5                                = 0xe5,
+	DBG_BLOCK_ID_UNUSED51                            = 0xe6,
+	DBG_BLOCK_ID_UNUSED52                            = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+	DBG_BLOCK_ID_RESERVED_BY2                        = 0x0,
+	DBG_BLOCK_ID_VMC_BY2                             = 0x1,
+	DBG_BLOCK_ID_CG_BY2                              = 0x2,
+	DBG_BLOCK_ID_GRBM_BY2                            = 0x3,
+	DBG_BLOCK_ID_CSC_BY2                             = 0x4,
+	DBG_BLOCK_ID_IH_BY2                              = 0x5,
+	DBG_BLOCK_ID_SQ_BY2                              = 0x6,
+	DBG_BLOCK_ID_GMCON_BY2                           = 0x7,
+	DBG_BLOCK_ID_DMA0_BY2                            = 0x8,
+	DBG_BLOCK_ID_SPIM_BY2                            = 0x9,
+	DBG_BLOCK_ID_SPIS_BY2                            = 0xa,
+	DBG_BLOCK_ID_PA0_BY2                             = 0xb,
+	DBG_BLOCK_ID_CP0_BY2                             = 0xc,
+	DBG_BLOCK_ID_CP2_BY2                             = 0xd,
+	DBG_BLOCK_ID_UVDU_BY2                            = 0xe,
+	DBG_BLOCK_ID_VCE_BY2                             = 0xf,
+	DBG_BLOCK_ID_VGT0_BY2                            = 0x10,
+	DBG_BLOCK_ID_IA_BY2                              = 0x11,
+	DBG_BLOCK_ID_SCT0_BY2                            = 0x12,
+	DBG_BLOCK_ID_SPM0_BY2                            = 0x13,
+	DBG_BLOCK_ID_TCAA_BY2                            = 0x14,
+	DBG_BLOCK_ID_TCCA_BY2                            = 0x15,
+	DBG_BLOCK_ID_MCC0_BY2                            = 0x16,
+	DBG_BLOCK_ID_MCC2_BY2                            = 0x17,
+	DBG_BLOCK_ID_SX0_BY2                             = 0x18,
+	DBG_BLOCK_ID_SX2_BY2                             = 0x19,
+	DBG_BLOCK_ID_UNUSED4_BY2                         = 0x1a,
+	DBG_BLOCK_ID_UNUSED6_BY2                         = 0x1b,
+	DBG_BLOCK_ID_PC0_BY2                             = 0x1c,
+	DBG_BLOCK_ID_UNUSED8_BY2                         = 0x1d,
+	DBG_BLOCK_ID_UNUSED10_BY2                        = 0x1e,
+	DBG_BLOCK_ID_MCB_BY2                             = 0x1f,
+	DBG_BLOCK_ID_SCB0_BY2                            = 0x20,
+	DBG_BLOCK_ID_UNUSED13_BY2                        = 0x21,
+	DBG_BLOCK_ID_SCF0_BY2                            = 0x22,
+	DBG_BLOCK_ID_UNUSED15_BY2                        = 0x23,
+	DBG_BLOCK_ID_BCI0_BY2                            = 0x24,
+	DBG_BLOCK_ID_BCI2_BY2                            = 0x25,
+	DBG_BLOCK_ID_UNUSED17_BY2                        = 0x26,
+	DBG_BLOCK_ID_UNUSED19_BY2                        = 0x27,
+	DBG_BLOCK_ID_CB00_BY2                            = 0x28,
+	DBG_BLOCK_ID_CB02_BY2                            = 0x29,
+	DBG_BLOCK_ID_CB04_BY2                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED22_BY2                        = 0x2b,
+	DBG_BLOCK_ID_CB10_BY2                            = 0x2c,
+	DBG_BLOCK_ID_CB12_BY2                            = 0x2d,
+	DBG_BLOCK_ID_CB14_BY2                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED25_BY2                        = 0x2f,
+	DBG_BLOCK_ID_TCP0_BY2                            = 0x30,
+	DBG_BLOCK_ID_TCP2_BY2                            = 0x31,
+	DBG_BLOCK_ID_TCP4_BY2                            = 0x32,
+	DBG_BLOCK_ID_TCP6_BY2                            = 0x33,
+	DBG_BLOCK_ID_TCP8_BY2                            = 0x34,
+	DBG_BLOCK_ID_TCP10_BY2                           = 0x35,
+	DBG_BLOCK_ID_TCP12_BY2                           = 0x36,
+	DBG_BLOCK_ID_TCP14_BY2                           = 0x37,
+	DBG_BLOCK_ID_TCP16_BY2                           = 0x38,
+	DBG_BLOCK_ID_TCP18_BY2                           = 0x39,
+	DBG_BLOCK_ID_TCP20_BY2                           = 0x3a,
+	DBG_BLOCK_ID_TCP22_BY2                           = 0x3b,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY2                   = 0x3c,
+	DBG_BLOCK_ID_TCP_RESERVED2_BY2                   = 0x3d,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY2                   = 0x3e,
+	DBG_BLOCK_ID_TCP_RESERVED6_BY2                   = 0x3f,
+	DBG_BLOCK_ID_DB00_BY2                            = 0x40,
+	DBG_BLOCK_ID_DB02_BY2                            = 0x41,
+	DBG_BLOCK_ID_DB04_BY2                            = 0x42,
+	DBG_BLOCK_ID_UNUSED28_BY2                        = 0x43,
+	DBG_BLOCK_ID_DB10_BY2                            = 0x44,
+	DBG_BLOCK_ID_DB12_BY2                            = 0x45,
+	DBG_BLOCK_ID_DB14_BY2                            = 0x46,
+	DBG_BLOCK_ID_UNUSED31_BY2                        = 0x47,
+	DBG_BLOCK_ID_TCC0_BY2                            = 0x48,
+	DBG_BLOCK_ID_TCC2_BY2                            = 0x49,
+	DBG_BLOCK_ID_TCC4_BY2                            = 0x4a,
+	DBG_BLOCK_ID_TCC6_BY2                            = 0x4b,
+	DBG_BLOCK_ID_SPS00_BY2                           = 0x4c,
+	DBG_BLOCK_ID_SPS02_BY2                           = 0x4d,
+	DBG_BLOCK_ID_SPS11_BY2                           = 0x4e,
+	DBG_BLOCK_ID_UNUSED33_BY2                        = 0x4f,
+	DBG_BLOCK_ID_TA00_BY2                            = 0x50,
+	DBG_BLOCK_ID_TA02_BY2                            = 0x51,
+	DBG_BLOCK_ID_TA04_BY2                            = 0x52,
+	DBG_BLOCK_ID_TA06_BY2                            = 0x53,
+	DBG_BLOCK_ID_TA08_BY2                            = 0x54,
+	DBG_BLOCK_ID_TA0A_BY2                            = 0x55,
+	DBG_BLOCK_ID_UNUSED35_BY2                        = 0x56,
+	DBG_BLOCK_ID_UNUSED37_BY2                        = 0x57,
+	DBG_BLOCK_ID_TA10_BY2                            = 0x58,
+	DBG_BLOCK_ID_TA12_BY2                            = 0x59,
+	DBG_BLOCK_ID_TA14_BY2                            = 0x5a,
+	DBG_BLOCK_ID_TA16_BY2                            = 0x5b,
+	DBG_BLOCK_ID_TA18_BY2                            = 0x5c,
+	DBG_BLOCK_ID_TA1A_BY2                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED39_BY2                        = 0x5e,
+	DBG_BLOCK_ID_UNUSED41_BY2                        = 0x5f,
+	DBG_BLOCK_ID_TD00_BY2                            = 0x60,
+	DBG_BLOCK_ID_TD02_BY2                            = 0x61,
+	DBG_BLOCK_ID_TD04_BY2                            = 0x62,
+	DBG_BLOCK_ID_TD06_BY2                            = 0x63,
+	DBG_BLOCK_ID_TD08_BY2                            = 0x64,
+	DBG_BLOCK_ID_TD0A_BY2                            = 0x65,
+	DBG_BLOCK_ID_UNUSED43_BY2                        = 0x66,
+	DBG_BLOCK_ID_UNUSED45_BY2                        = 0x67,
+	DBG_BLOCK_ID_TD10_BY2                            = 0x68,
+	DBG_BLOCK_ID_TD12_BY2                            = 0x69,
+	DBG_BLOCK_ID_TD14_BY2                            = 0x6a,
+	DBG_BLOCK_ID_TD16_BY2                            = 0x6b,
+	DBG_BLOCK_ID_TD18_BY2                            = 0x6c,
+	DBG_BLOCK_ID_TD1A_BY2                            = 0x6d,
+	DBG_BLOCK_ID_UNUSED47_BY2                        = 0x6e,
+	DBG_BLOCK_ID_UNUSED49_BY2                        = 0x6f,
+	DBG_BLOCK_ID_MCD0_BY2                            = 0x70,
+	DBG_BLOCK_ID_MCD2_BY2                            = 0x71,
+	DBG_BLOCK_ID_MCD4_BY2                            = 0x72,
+	DBG_BLOCK_ID_UNUSED51_BY2                        = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+	DBG_BLOCK_ID_RESERVED_BY4                        = 0x0,
+	DBG_BLOCK_ID_CG_BY4                              = 0x1,
+	DBG_BLOCK_ID_CSC_BY4                             = 0x2,
+	DBG_BLOCK_ID_SQ_BY4                              = 0x3,
+	DBG_BLOCK_ID_DMA0_BY4                            = 0x4,
+	DBG_BLOCK_ID_SPIS_BY4                            = 0x5,
+	DBG_BLOCK_ID_CP0_BY4                             = 0x6,
+	DBG_BLOCK_ID_UVDU_BY4                            = 0x7,
+	DBG_BLOCK_ID_VGT0_BY4                            = 0x8,
+	DBG_BLOCK_ID_SCT0_BY4                            = 0x9,
+	DBG_BLOCK_ID_TCAA_BY4                            = 0xa,
+	DBG_BLOCK_ID_MCC0_BY4                            = 0xb,
+	DBG_BLOCK_ID_SX0_BY4                             = 0xc,
+	DBG_BLOCK_ID_UNUSED4_BY4                         = 0xd,
+	DBG_BLOCK_ID_PC0_BY4                             = 0xe,
+	DBG_BLOCK_ID_UNUSED10_BY4                        = 0xf,
+	DBG_BLOCK_ID_SCB0_BY4                            = 0x10,
+	DBG_BLOCK_ID_SCF0_BY4                            = 0x11,
+	DBG_BLOCK_ID_BCI0_BY4                            = 0x12,
+	DBG_BLOCK_ID_UNUSED17_BY4                        = 0x13,
+	DBG_BLOCK_ID_CB00_BY4                            = 0x14,
+	DBG_BLOCK_ID_CB04_BY4                            = 0x15,
+	DBG_BLOCK_ID_CB10_BY4                            = 0x16,
+	DBG_BLOCK_ID_CB14_BY4                            = 0x17,
+	DBG_BLOCK_ID_TCP0_BY4                            = 0x18,
+	DBG_BLOCK_ID_TCP4_BY4                            = 0x19,
+	DBG_BLOCK_ID_TCP8_BY4                            = 0x1a,
+	DBG_BLOCK_ID_TCP12_BY4                           = 0x1b,
+	DBG_BLOCK_ID_TCP16_BY4                           = 0x1c,
+	DBG_BLOCK_ID_TCP20_BY4                           = 0x1d,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY4                   = 0x1e,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY4                   = 0x1f,
+	DBG_BLOCK_ID_DB_BY4                              = 0x20,
+	DBG_BLOCK_ID_DB04_BY4                            = 0x21,
+	DBG_BLOCK_ID_DB10_BY4                            = 0x22,
+	DBG_BLOCK_ID_DB14_BY4                            = 0x23,
+	DBG_BLOCK_ID_TCC0_BY4                            = 0x24,
+	DBG_BLOCK_ID_TCC4_BY4                            = 0x25,
+	DBG_BLOCK_ID_SPS00_BY4                           = 0x26,
+	DBG_BLOCK_ID_SPS11_BY4                           = 0x27,
+	DBG_BLOCK_ID_TA00_BY4                            = 0x28,
+	DBG_BLOCK_ID_TA04_BY4                            = 0x29,
+	DBG_BLOCK_ID_TA08_BY4                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED35_BY4                        = 0x2b,
+	DBG_BLOCK_ID_TA10_BY4                            = 0x2c,
+	DBG_BLOCK_ID_TA14_BY4                            = 0x2d,
+	DBG_BLOCK_ID_TA18_BY4                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED39_BY4                        = 0x2f,
+	DBG_BLOCK_ID_TD00_BY4                            = 0x30,
+	DBG_BLOCK_ID_TD04_BY4                            = 0x31,
+	DBG_BLOCK_ID_TD08_BY4                            = 0x32,
+	DBG_BLOCK_ID_UNUSED43_BY4                        = 0x33,
+	DBG_BLOCK_ID_TD10_BY4                            = 0x34,
+	DBG_BLOCK_ID_TD14_BY4                            = 0x35,
+	DBG_BLOCK_ID_TD18_BY4                            = 0x36,
+	DBG_BLOCK_ID_UNUSED47_BY4                        = 0x37,
+	DBG_BLOCK_ID_MCD0_BY4                            = 0x38,
+	DBG_BLOCK_ID_MCD4_BY4                            = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+	DBG_BLOCK_ID_RESERVED_BY8                        = 0x0,
+	DBG_BLOCK_ID_CSC_BY8                             = 0x1,
+	DBG_BLOCK_ID_DMA0_BY8                            = 0x2,
+	DBG_BLOCK_ID_CP0_BY8                             = 0x3,
+	DBG_BLOCK_ID_VGT0_BY8                            = 0x4,
+	DBG_BLOCK_ID_TCAA_BY8                            = 0x5,
+	DBG_BLOCK_ID_SX0_BY8                             = 0x6,
+	DBG_BLOCK_ID_PC0_BY8                             = 0x7,
+	DBG_BLOCK_ID_SCB0_BY8                            = 0x8,
+	DBG_BLOCK_ID_BCI0_BY8                            = 0x9,
+	DBG_BLOCK_ID_CB00_BY8                            = 0xa,
+	DBG_BLOCK_ID_CB10_BY8                            = 0xb,
+	DBG_BLOCK_ID_TCP0_BY8                            = 0xc,
+	DBG_BLOCK_ID_TCP8_BY8                            = 0xd,
+	DBG_BLOCK_ID_TCP16_BY8                           = 0xe,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY8                   = 0xf,
+	DBG_BLOCK_ID_DB00_BY8                            = 0x10,
+	DBG_BLOCK_ID_DB10_BY8                            = 0x11,
+	DBG_BLOCK_ID_TCC0_BY8                            = 0x12,
+	DBG_BLOCK_ID_SPS00_BY8                           = 0x13,
+	DBG_BLOCK_ID_TA00_BY8                            = 0x14,
+	DBG_BLOCK_ID_TA08_BY8                            = 0x15,
+	DBG_BLOCK_ID_TA10_BY8                            = 0x16,
+	DBG_BLOCK_ID_TA18_BY8                            = 0x17,
+	DBG_BLOCK_ID_TD00_BY8                            = 0x18,
+	DBG_BLOCK_ID_TD08_BY8                            = 0x19,
+	DBG_BLOCK_ID_TD10_BY8                            = 0x1a,
+	DBG_BLOCK_ID_TD18_BY8                            = 0x1b,
+	DBG_BLOCK_ID_MCD0_BY8                            = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+	DBG_BLOCK_ID_RESERVED_BY16                       = 0x0,
+	DBG_BLOCK_ID_DMA0_BY16                           = 0x1,
+	DBG_BLOCK_ID_VGT0_BY16                           = 0x2,
+	DBG_BLOCK_ID_SX0_BY16                            = 0x3,
+	DBG_BLOCK_ID_SCB0_BY16                           = 0x4,
+	DBG_BLOCK_ID_CB00_BY16                           = 0x5,
+	DBG_BLOCK_ID_TCP0_BY16                           = 0x6,
+	DBG_BLOCK_ID_TCP16_BY16                          = 0x7,
+	DBG_BLOCK_ID_DB00_BY16                           = 0x8,
+	DBG_BLOCK_ID_TCC0_BY16                           = 0x9,
+	DBG_BLOCK_ID_TA00_BY16                           = 0xa,
+	DBG_BLOCK_ID_TA10_BY16                           = 0xb,
+	DBG_BLOCK_ID_TD00_BY16                           = 0xc,
+	DBG_BLOCK_ID_TD10_BY16                           = 0xd,
+	DBG_BLOCK_ID_MCD0_BY16                           = 0xe,
+} DebugBlockId_BY16;
+typedef enum CompareRef {
+	REF_NEVER                                        = 0x0,
+	REF_LESS                                         = 0x1,
+	REF_EQUAL                                        = 0x2,
+	REF_LEQUAL                                       = 0x3,
+	REF_GREATER                                      = 0x4,
+	REF_NOTEQUAL                                     = 0x5,
+	REF_GEQUAL                                       = 0x6,
+	REF_ALWAYS                                       = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+	READ_256_BITS                                    = 0x0,
+	READ_512_BITS                                    = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+	DEPTH_INVALID                                    = 0x0,
+	DEPTH_16                                         = 0x1,
+	DEPTH_X8_24                                      = 0x2,
+	DEPTH_8_24                                       = 0x3,
+	DEPTH_X8_24_FLOAT                                = 0x4,
+	DEPTH_8_24_FLOAT                                 = 0x5,
+	DEPTH_32_FLOAT                                   = 0x6,
+	DEPTH_X24_8_32_FLOAT                             = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+	Z_INVALID                                        = 0x0,
+	Z_16                                             = 0x1,
+	Z_24                                             = 0x2,
+	Z_32_FLOAT                                       = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+	STENCIL_INVALID                                  = 0x0,
+	STENCIL_8                                        = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+	CMASK_CLEAR_NONE                                 = 0x0,
+	CMASK_CLEAR_ONE                                  = 0x1,
+	CMASK_CLEAR_ALL                                  = 0x2,
+	CMASK_ANY_EXPANDED                               = 0x3,
+	CMASK_ALPHA0_FRAG1                               = 0x4,
+	CMASK_ALPHA0_FRAG2                               = 0x5,
+	CMASK_ALPHA0_FRAG4                               = 0x6,
+	CMASK_ALPHA0_FRAGS                               = 0x7,
+	CMASK_ALPHA1_FRAG1                               = 0x8,
+	CMASK_ALPHA1_FRAG2                               = 0x9,
+	CMASK_ALPHA1_FRAG4                               = 0xa,
+	CMASK_ALPHA1_FRAGS                               = 0xb,
+	CMASK_ALPHAX_FRAG1                               = 0xc,
+	CMASK_ALPHAX_FRAG2                               = 0xd,
+	CMASK_ALPHAX_FRAG4                               = 0xe,
+	CMASK_ALPHAX_FRAGS                               = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+	EXPORT_UNUSED                                    = 0x0,
+	EXPORT_32_R                                      = 0x1,
+	EXPORT_32_GR                                     = 0x2,
+	EXPORT_32_AR                                     = 0x3,
+	EXPORT_FP16_ABGR                                 = 0x4,
+	EXPORT_UNSIGNED16_ABGR                           = 0x5,
+	EXPORT_SIGNED16_ABGR                             = 0x6,
+	EXPORT_32_ABGR                                   = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+	EXPORT_4P_32BPC_ABGR                             = 0x0,
+	EXPORT_4P_16BPC_ABGR                             = 0x1,
+	EXPORT_4P_32BPC_GR                               = 0x2,
+	EXPORT_4P_32BPC_AR                               = 0x3,
+	EXPORT_2P_32BPC_ABGR                             = 0x4,
+	EXPORT_8P_32BPC_R                                = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+	COLOR_INVALID                                    = 0x0,
+	COLOR_8                                          = 0x1,
+	COLOR_16                                         = 0x2,
+	COLOR_8_8                                        = 0x3,
+	COLOR_32                                         = 0x4,
+	COLOR_16_16                                      = 0x5,
+	COLOR_10_11_11                                   = 0x6,
+	COLOR_11_11_10                                   = 0x7,
+	COLOR_10_10_10_2                                 = 0x8,
+	COLOR_2_10_10_10                                 = 0x9,
+	COLOR_8_8_8_8                                    = 0xa,
+	COLOR_32_32                                      = 0xb,
+	COLOR_16_16_16_16                                = 0xc,
+	COLOR_RESERVED_13                                = 0xd,
+	COLOR_32_32_32_32                                = 0xe,
+	COLOR_RESERVED_15                                = 0xf,
+	COLOR_5_6_5                                      = 0x10,
+	COLOR_1_5_5_5                                    = 0x11,
+	COLOR_5_5_5_1                                    = 0x12,
+	COLOR_4_4_4_4                                    = 0x13,
+	COLOR_8_24                                       = 0x14,
+	COLOR_24_8                                       = 0x15,
+	COLOR_X24_8_32_FLOAT                             = 0x16,
+	COLOR_RESERVED_23                                = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+	FMT_INVALID                                      = 0x0,
+	FMT_8                                            = 0x1,
+	FMT_16                                           = 0x2,
+	FMT_8_8                                          = 0x3,
+	FMT_32                                           = 0x4,
+	FMT_16_16                                        = 0x5,
+	FMT_10_11_11                                     = 0x6,
+	FMT_11_11_10                                     = 0x7,
+	FMT_10_10_10_2                                   = 0x8,
+	FMT_2_10_10_10                                   = 0x9,
+	FMT_8_8_8_8                                      = 0xa,
+	FMT_32_32                                        = 0xb,
+	FMT_16_16_16_16                                  = 0xc,
+	FMT_32_32_32                                     = 0xd,
+	FMT_32_32_32_32                                  = 0xe,
+	FMT_RESERVED_4                                   = 0xf,
+	FMT_5_6_5                                        = 0x10,
+	FMT_1_5_5_5                                      = 0x11,
+	FMT_5_5_5_1                                      = 0x12,
+	FMT_4_4_4_4                                      = 0x13,
+	FMT_8_24                                         = 0x14,
+	FMT_24_8                                         = 0x15,
+	FMT_X24_8_32_FLOAT                               = 0x16,
+	FMT_RESERVED_33                                  = 0x17,
+	FMT_11_11_10_FLOAT                               = 0x18,
+	FMT_16_FLOAT                                     = 0x19,
+	FMT_32_FLOAT                                     = 0x1a,
+	FMT_16_16_FLOAT                                  = 0x1b,
+	FMT_8_24_FLOAT                                   = 0x1c,
+	FMT_24_8_FLOAT                                   = 0x1d,
+	FMT_32_32_FLOAT                                  = 0x1e,
+	FMT_10_11_11_FLOAT                               = 0x1f,
+	FMT_16_16_16_16_FLOAT                            = 0x20,
+	FMT_3_3_2                                        = 0x21,
+	FMT_6_5_5                                        = 0x22,
+	FMT_32_32_32_32_FLOAT                            = 0x23,
+	FMT_RESERVED_36                                  = 0x24,
+	FMT_1                                            = 0x25,
+	FMT_1_REVERSED                                   = 0x26,
+	FMT_GB_GR                                        = 0x27,
+	FMT_BG_RG                                        = 0x28,
+	FMT_32_AS_8                                      = 0x29,
+	FMT_32_AS_8_8                                    = 0x2a,
+	FMT_5_9_9_9_SHAREDEXP                            = 0x2b,
+	FMT_8_8_8                                        = 0x2c,
+	FMT_16_16_16                                     = 0x2d,
+	FMT_16_16_16_FLOAT                               = 0x2e,
+	FMT_4_4                                          = 0x2f,
+	FMT_32_32_32_FLOAT                               = 0x30,
+	FMT_BC1                                          = 0x31,
+	FMT_BC2                                          = 0x32,
+	FMT_BC3                                          = 0x33,
+	FMT_BC4                                          = 0x34,
+	FMT_BC5                                          = 0x35,
+	FMT_BC6                                          = 0x36,
+	FMT_BC7                                          = 0x37,
+	FMT_32_AS_32_32_32_32                            = 0x38,
+	FMT_APC3                                         = 0x39,
+	FMT_APC4                                         = 0x3a,
+	FMT_APC5                                         = 0x3b,
+	FMT_APC6                                         = 0x3c,
+	FMT_APC7                                         = 0x3d,
+	FMT_CTX1                                         = 0x3e,
+	FMT_RESERVED_63                                  = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+	BUF_DATA_FORMAT_INVALID                          = 0x0,
+	BUF_DATA_FORMAT_8                                = 0x1,
+	BUF_DATA_FORMAT_16                               = 0x2,
+	BUF_DATA_FORMAT_8_8                              = 0x3,
+	BUF_DATA_FORMAT_32                               = 0x4,
+	BUF_DATA_FORMAT_16_16                            = 0x5,
+	BUF_DATA_FORMAT_10_11_11                         = 0x6,
+	BUF_DATA_FORMAT_11_11_10                         = 0x7,
+	BUF_DATA_FORMAT_10_10_10_2                       = 0x8,
+	BUF_DATA_FORMAT_2_10_10_10                       = 0x9,
+	BUF_DATA_FORMAT_8_8_8_8                          = 0xa,
+	BUF_DATA_FORMAT_32_32                            = 0xb,
+	BUF_DATA_FORMAT_16_16_16_16                      = 0xc,
+	BUF_DATA_FORMAT_32_32_32                         = 0xd,
+	BUF_DATA_FORMAT_32_32_32_32                      = 0xe,
+	BUF_DATA_FORMAT_RESERVED_15                      = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+	IMG_DATA_FORMAT_INVALID                          = 0x0,
+	IMG_DATA_FORMAT_8                                = 0x1,
+	IMG_DATA_FORMAT_16                               = 0x2,
+	IMG_DATA_FORMAT_8_8                              = 0x3,
+	IMG_DATA_FORMAT_32                               = 0x4,
+	IMG_DATA_FORMAT_16_16                            = 0x5,
+	IMG_DATA_FORMAT_10_11_11                         = 0x6,
+	IMG_DATA_FORMAT_11_11_10                         = 0x7,
+	IMG_DATA_FORMAT_10_10_10_2                       = 0x8,
+	IMG_DATA_FORMAT_2_10_10_10                       = 0x9,
+	IMG_DATA_FORMAT_8_8_8_8                          = 0xa,
+	IMG_DATA_FORMAT_32_32                            = 0xb,
+	IMG_DATA_FORMAT_16_16_16_16                      = 0xc,
+	IMG_DATA_FORMAT_32_32_32                         = 0xd,
+	IMG_DATA_FORMAT_32_32_32_32                      = 0xe,
+	IMG_DATA_FORMAT_RESERVED_15                      = 0xf,
+	IMG_DATA_FORMAT_5_6_5                            = 0x10,
+	IMG_DATA_FORMAT_1_5_5_5                          = 0x11,
+	IMG_DATA_FORMAT_5_5_5_1                          = 0x12,
+	IMG_DATA_FORMAT_4_4_4_4                          = 0x13,
+	IMG_DATA_FORMAT_8_24                             = 0x14,
+	IMG_DATA_FORMAT_24_8                             = 0x15,
+	IMG_DATA_FORMAT_X24_8_32                         = 0x16,
+	IMG_DATA_FORMAT_RESERVED_23                      = 0x17,
+	IMG_DATA_FORMAT_RESERVED_24                      = 0x18,
+	IMG_DATA_FORMAT_RESERVED_25                      = 0x19,
+	IMG_DATA_FORMAT_RESERVED_26                      = 0x1a,
+	IMG_DATA_FORMAT_RESERVED_27                      = 0x1b,
+	IMG_DATA_FORMAT_RESERVED_28                      = 0x1c,
+	IMG_DATA_FORMAT_RESERVED_29                      = 0x1d,
+	IMG_DATA_FORMAT_RESERVED_30                      = 0x1e,
+	IMG_DATA_FORMAT_RESERVED_31                      = 0x1f,
+	IMG_DATA_FORMAT_GB_GR                            = 0x20,
+	IMG_DATA_FORMAT_BG_RG                            = 0x21,
+	IMG_DATA_FORMAT_5_9_9_9                          = 0x22,
+	IMG_DATA_FORMAT_BC1                              = 0x23,
+	IMG_DATA_FORMAT_BC2                              = 0x24,
+	IMG_DATA_FORMAT_BC3                              = 0x25,
+	IMG_DATA_FORMAT_BC4                              = 0x26,
+	IMG_DATA_FORMAT_BC5                              = 0x27,
+	IMG_DATA_FORMAT_BC6                              = 0x28,
+	IMG_DATA_FORMAT_BC7                              = 0x29,
+	IMG_DATA_FORMAT_RESERVED_42                      = 0x2a,
+	IMG_DATA_FORMAT_RESERVED_43                      = 0x2b,
+	IMG_DATA_FORMAT_FMASK8_S2_F1                     = 0x2c,
+	IMG_DATA_FORMAT_FMASK8_S4_F1                     = 0x2d,
+	IMG_DATA_FORMAT_FMASK8_S8_F1                     = 0x2e,
+	IMG_DATA_FORMAT_FMASK8_S2_F2                     = 0x2f,
+	IMG_DATA_FORMAT_FMASK8_S4_F2                     = 0x30,
+	IMG_DATA_FORMAT_FMASK8_S4_F4                     = 0x31,
+	IMG_DATA_FORMAT_FMASK16_S16_F1                   = 0x32,
+	IMG_DATA_FORMAT_FMASK16_S8_F2                    = 0x33,
+	IMG_DATA_FORMAT_FMASK32_S16_F2                   = 0x34,
+	IMG_DATA_FORMAT_FMASK32_S8_F4                    = 0x35,
+	IMG_DATA_FORMAT_FMASK32_S8_F8                    = 0x36,
+	IMG_DATA_FORMAT_FMASK64_S16_F4                   = 0x37,
+	IMG_DATA_FORMAT_FMASK64_S16_F8                   = 0x38,
+	IMG_DATA_FORMAT_4_4                              = 0x39,
+	IMG_DATA_FORMAT_6_5_5                            = 0x3a,
+	IMG_DATA_FORMAT_1                                = 0x3b,
+	IMG_DATA_FORMAT_1_REVERSED                       = 0x3c,
+	IMG_DATA_FORMAT_32_AS_8                          = 0x3d,
+	IMG_DATA_FORMAT_32_AS_8_8                        = 0x3e,
+	IMG_DATA_FORMAT_32_AS_32_32_32_32                = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+	BUF_NUM_FORMAT_UNORM                             = 0x0,
+	BUF_NUM_FORMAT_SNORM                             = 0x1,
+	BUF_NUM_FORMAT_USCALED                           = 0x2,
+	BUF_NUM_FORMAT_SSCALED                           = 0x3,
+	BUF_NUM_FORMAT_UINT                              = 0x4,
+	BUF_NUM_FORMAT_SINT                              = 0x5,
+	BUF_NUM_FORMAT_SNORM_OGL                         = 0x6,
+	BUF_NUM_FORMAT_FLOAT                             = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+	IMG_NUM_FORMAT_UNORM                             = 0x0,
+	IMG_NUM_FORMAT_SNORM                             = 0x1,
+	IMG_NUM_FORMAT_USCALED                           = 0x2,
+	IMG_NUM_FORMAT_SSCALED                           = 0x3,
+	IMG_NUM_FORMAT_UINT                              = 0x4,
+	IMG_NUM_FORMAT_SINT                              = 0x5,
+	IMG_NUM_FORMAT_SNORM_OGL                         = 0x6,
+	IMG_NUM_FORMAT_FLOAT                             = 0x7,
+	IMG_NUM_FORMAT_RESERVED_8                        = 0x8,
+	IMG_NUM_FORMAT_SRGB                              = 0x9,
+	IMG_NUM_FORMAT_UBNORM                            = 0xa,
+	IMG_NUM_FORMAT_UBNORM_OGL                        = 0xb,
+	IMG_NUM_FORMAT_UBINT                             = 0xc,
+	IMG_NUM_FORMAT_UBSCALED                          = 0xd,
+	IMG_NUM_FORMAT_RESERVED_14                       = 0xe,
+	IMG_NUM_FORMAT_RESERVED_15                       = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+	ARRAY_COLOR_TILE                                 = 0x0,
+	ARRAY_DEPTH_TILE                                 = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+	ADDR_SURF_MICRO_TILING_DISPLAY                   = 0x0,
+	ADDR_SURF_MICRO_TILING_NON_DISPLAY               = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+	ADDR_SURF_DISPLAY_MICRO_TILING                   = 0x0,
+	ADDR_SURF_THIN_MICRO_TILING                      = 0x1,
+	ADDR_SURF_DEPTH_MICRO_TILING                     = 0x2,
+	ADDR_SURF_ROTATED_MICRO_TILING                   = 0x3,
+	ADDR_SURF_THICK_MICRO_TILING                     = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+	ADDR_SURF_TILE_SPLIT_64B                         = 0x0,
+	ADDR_SURF_TILE_SPLIT_128B                        = 0x1,
+	ADDR_SURF_TILE_SPLIT_256B                        = 0x2,
+	ADDR_SURF_TILE_SPLIT_512B                        = 0x3,
+	ADDR_SURF_TILE_SPLIT_1KB                         = 0x4,
+	ADDR_SURF_TILE_SPLIT_2KB                         = 0x5,
+	ADDR_SURF_TILE_SPLIT_4KB                         = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+	ADDR_SURF_SAMPLE_SPLIT_1                         = 0x0,
+	ADDR_SURF_SAMPLE_SPLIT_2                         = 0x1,
+	ADDR_SURF_SAMPLE_SPLIT_4                         = 0x2,
+	ADDR_SURF_SAMPLE_SPLIT_8                         = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+	ADDR_SURF_P2                                     = 0x0,
+	ADDR_SURF_P2_RESERVED0                           = 0x1,
+	ADDR_SURF_P2_RESERVED1                           = 0x2,
+	ADDR_SURF_P2_RESERVED2                           = 0x3,
+	ADDR_SURF_P4_8x16                                = 0x4,
+	ADDR_SURF_P4_16x16                               = 0x5,
+	ADDR_SURF_P4_16x32                               = 0x6,
+	ADDR_SURF_P4_32x32                               = 0x7,
+	ADDR_SURF_P8_16x16_8x16                          = 0x8,
+	ADDR_SURF_P8_16x32_8x16                          = 0x9,
+	ADDR_SURF_P8_32x32_8x16                          = 0xa,
+	ADDR_SURF_P8_16x32_16x16                         = 0xb,
+	ADDR_SURF_P8_32x32_16x16                         = 0xc,
+	ADDR_SURF_P8_32x32_16x32                         = 0xd,
+	ADDR_SURF_P8_32x64_32x32                         = 0xe,
+} PipeConfig;
+typedef enum NumBanks {
+	ADDR_SURF_2_BANK                                 = 0x0,
+	ADDR_SURF_4_BANK                                 = 0x1,
+	ADDR_SURF_8_BANK                                 = 0x2,
+	ADDR_SURF_16_BANK                                = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+	ADDR_SURF_BANK_WIDTH_1                           = 0x0,
+	ADDR_SURF_BANK_WIDTH_2                           = 0x1,
+	ADDR_SURF_BANK_WIDTH_4                           = 0x2,
+	ADDR_SURF_BANK_WIDTH_8                           = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+	ADDR_SURF_BANK_HEIGHT_1                          = 0x0,
+	ADDR_SURF_BANK_HEIGHT_2                          = 0x1,
+	ADDR_SURF_BANK_HEIGHT_4                          = 0x2,
+	ADDR_SURF_BANK_HEIGHT_8                          = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+	ADDR_SURF_BANK_WH_1                              = 0x0,
+	ADDR_SURF_BANK_WH_2                              = 0x1,
+	ADDR_SURF_BANK_WH_4                              = 0x2,
+	ADDR_SURF_BANK_WH_8                              = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+	ADDR_SURF_MACRO_ASPECT_1                         = 0x0,
+	ADDR_SURF_MACRO_ASPECT_2                         = 0x1,
+	ADDR_SURF_MACRO_ASPECT_4                         = 0x2,
+	ADDR_SURF_MACRO_ASPECT_8                         = 0x3,
+} MacroTileAspect;
+typedef enum TCC_CACHE_POLICIES {
+	TCC_CACHE_POLICY_LRU                             = 0x0,
+	TCC_CACHE_POLICY_STREAM                          = 0x1,
+	TCC_CACHE_POLICY_BYPASS                          = 0x2,
+} TCC_CACHE_POLICIES;
+typedef enum PERFMON_COUNTER_MODE {
+	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
+	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
+	PERFMON_COUNTER_MODE_MAX                         = 0x2,
+	PERFMON_COUNTER_MODE_DIRTY                       = 0x3,
+	PERFMON_COUNTER_MODE_SAMPLE                      = 0x4,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT    = 0x5,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT     = 0x6,
+	PERFMON_COUNTER_MODE_CYCLES_GE_HI                = 0x7,
+	PERFMON_COUNTER_MODE_CYCLES_EQ_HI                = 0x8,
+	PERFMON_COUNTER_MODE_INACTIVE_CYCLES             = 0x9,
+	PERFMON_COUNTER_MODE_RESERVED                    = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+	PERFMON_SPM_MODE_OFF                             = 0x0,
+	PERFMON_SPM_MODE_16BIT_CLAMP                     = 0x1,
+	PERFMON_SPM_MODE_16BIT_NO_CLAMP                  = 0x2,
+	PERFMON_SPM_MODE_32BIT_CLAMP                     = 0x3,
+	PERFMON_SPM_MODE_32BIT_NO_CLAMP                  = 0x4,
+	PERFMON_SPM_MODE_RESERVED_5                      = 0x5,
+	PERFMON_SPM_MODE_RESERVED_6                      = 0x6,
+	PERFMON_SPM_MODE_RESERVED_7                      = 0x7,
+	PERFMON_SPM_MODE_TEST_MODE_0                     = 0x8,
+	PERFMON_SPM_MODE_TEST_MODE_1                     = 0x9,
+	PERFMON_SPM_MODE_TEST_MODE_2                     = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+	ARRAY_LINEAR                                     = 0x0,
+	ARRAY_TILED                                      = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+	ARRAY_1D                                         = 0x0,
+	ARRAY_2D                                         = 0x1,
+	ARRAY_3D                                         = 0x2,
+	ARRAY_3D_SLICE                                   = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+	ARRAY_2D_ALT_COLOR                               = 0x0,
+	ARRAY_2D_COLOR                                   = 0x1,
+	ARRAY_3D_SLICE_COLOR                             = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+	ARRAY_2D_ALT_DEPTH                               = 0x0,
+	ARRAY_2D_DEPTH                                   = 0x1,
+} DepthArray;
+
+#endif /* DCE_8_0_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
index 8a29307..c331c9f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
@@ -4130,6 +4130,18 @@
 #define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe
 #define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x10000
 #define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x1
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x0
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x2
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x1
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x4
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x2
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x10
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x4
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x20
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x5
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x40
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x6
 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1
 #define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0
 #define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
index 9d4347d..dfe7879 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
@@ -6225,6 +6225,12 @@
 	TCC_CACHE_POLICY_STREAM                          = 0x1,
 	TCC_CACHE_POLICY_BYPASS                          = 0x2,
 } TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+	MTYPE_NC_NV                                      = 0x0,
+	MTYPE_NC                                         = 0x1,
+	MTYPE_CC                                         = 0x2,
+	MTYPE_UC                                         = 0x3,
+} MTYPE;
 typedef enum PERFMON_COUNTER_MODE {
 	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
 	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
new file mode 100644
index 0000000..d21c6b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -0,0 +1,102 @@
+/*
+ * Volcanic Islands IV SRC Register documentation
+ *
+ * Copyright (C) 2015  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _IVSRCID_VISLANDS30_H_
+#define _IVSRCID_VISLANDS30_H_
+
+
+// IV Source IDs
+
+#define VISLANDS30_IV_SRCID_D1_V_UPDATE_INT		            7	    // 0x07	
+#define VISLANDS30_IV_EXTID_D1_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D1_GRPH_PFLIP		            8	    // 0x08	
+#define VISLANDS30_IV_EXTID_D1_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_D2_V_UPDATE_INT		            9	    // 0x09	
+#define VISLANDS30_IV_EXTID_D2_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D2_GRPH_PFLIP  		            10	    // 0x0a	
+#define VISLANDS30_IV_EXTID_D2_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_D3_V_UPDATE_INT		            11	    // 0x0b	
+#define VISLANDS30_IV_EXTID_D3_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D3_GRPH_PFLIP		            12	    // 0x0c	
+#define VISLANDS30_IV_EXTID_D3_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_D4_V_UPDATE_INT		            13	    // 0x0d  	
+#define VISLANDS30_IV_EXTID_D4_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D4_GRPH_PFLIP		            14	    // 0x0e  	
+#define VISLANDS30_IV_EXTID_D4_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_D5_V_UPDATE_INT		            15	    // 0x0f	
+#define VISLANDS30_IV_EXTID_D5_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D5_GRPH_PFLIP		            16	    // 0x10  	
+#define VISLANDS30_IV_EXTID_D5_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_D6_V_UPDATE_INT		            17	    // 0x11      	
+#define VISLANDS30_IV_EXTID_D6_V_UPDATE_INT                  0
+
+#define VISLANDS30_IV_SRCID_D6_GRPH_PFLIP		            18	    // 0x12  	
+#define VISLANDS30_IV_EXTID_D6_GRPH_PFLIP                    0
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A		            42	    // 0x2a	
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A                 0
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_B   		        42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B                 1
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_C   		        42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C                 2
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_D	    	        42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D                 3
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_E		            42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E                 4
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_F		            42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F                 5
+
+#define VISLANDS30_IV_SRCID_HPD_RX_A		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_A                         6
+
+#define VISLANDS30_IV_SRCID_HPD_RX_B		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_B                         7
+
+#define VISLANDS30_IV_SRCID_HPD_RX_C		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_C                         8
+
+#define VISLANDS30_IV_SRCID_HPD_RX_D		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_D                         9
+
+#define VISLANDS30_IV_SRCID_HPD_RX_E		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_E                         10
+
+#define VISLANDS30_IV_SRCID_HPD_RX_F		                    42	    // 0x2a		
+#define VISLANDS30_IV_EXTID_HPD_RX_F                         11
+
+#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 888250b..a09d9f3 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -221,7 +221,7 @@
 	int (*resume)(struct kfd_dev *kfd);
 };
 
-bool kgd2kfd_init(unsigned interface_version,
+int kgd2kfd_init(unsigned interface_version,
 		const struct kgd2kfd_calls **g2f);
 
 #endif	/* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 589599f..9d22900 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -29,6 +29,7 @@
 #include "pp_instance.h"
 #include "power_state.h"
 #include "eventmanager.h"
+#include "pp_debug.h"
 
 #define PP_CHECK(handle)						\
 	do {								\
@@ -436,7 +437,10 @@
 	case PP_StateUILabel_Performance:
 		return POWER_STATE_TYPE_PERFORMANCE;
 	default:
-		return POWER_STATE_TYPE_DEFAULT;
+		if (state->classification.flags & PP_StateClassificationFlag_Boot)
+			return  POWER_STATE_TYPE_INTERNAL_BOOT;
+		else
+			return POWER_STATE_TYPE_DEFAULT;
 	}
 }
 
@@ -538,6 +542,112 @@
 	return hwmgr->hwmgr_func->get_temperature(hwmgr);
 }
 
+static int pp_dpm_get_pp_num_states(void *handle,
+		struct pp_states_info *data)
+{
+	struct pp_hwmgr *hwmgr;
+	int i;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (hwmgr == NULL || hwmgr->ps == NULL)
+		return -EINVAL;
+
+	data->nums = hwmgr->num_ps;
+
+	for (i = 0; i < hwmgr->num_ps; i++) {
+		struct pp_power_state *state = (struct pp_power_state *)
+				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
+		switch (state->classification.ui_label) {
+		case PP_StateUILabel_Battery:
+			data->states[i] = POWER_STATE_TYPE_BATTERY;
+			break;
+		case PP_StateUILabel_Balanced:
+			data->states[i] = POWER_STATE_TYPE_BALANCED;
+			break;
+		case PP_StateUILabel_Performance:
+			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
+			break;
+		default:
+			if (state->classification.flags & PP_StateClassificationFlag_Boot)
+				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
+			else
+				data->states[i] = POWER_STATE_TYPE_DEFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int pp_dpm_get_pp_table(void *handle, char **table)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+		hwmgr->hwmgr_func->get_pp_table == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+}
+
+static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+		hwmgr->hwmgr_func->set_pp_table == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+}
+
+static int pp_dpm_force_clock_level(void *handle,
+		enum pp_clock_type type, int level)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+			hwmgr->hwmgr_func->force_clock_level == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
+}
+
+static int pp_dpm_print_clock_levels(void *handle,
+		enum pp_clock_type type, char *buf)
+{
+	struct pp_hwmgr *hwmgr;
+
+	if (!handle)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+			hwmgr->hwmgr_func->print_clock_levels == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
 	.get_temperature = pp_dpm_get_temperature,
 	.load_firmware = pp_dpm_load_fw,
@@ -555,6 +665,11 @@
 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
 	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
 	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+	.get_pp_num_states = pp_dpm_get_pp_num_states,
+	.get_pp_table = pp_dpm_get_pp_table,
+	.set_pp_table = pp_dpm_set_pp_table,
+	.force_clock_level = pp_dpm_force_clock_level,
+	.print_clock_levels = pp_dpm_print_clock_levels,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -638,10 +753,10 @@
 
 /* export this function to DAL */
 
-int amd_powerplay_display_configuration_change(void *handle, const void *input)
+int amd_powerplay_display_configuration_change(void *handle,
+	const struct amd_pp_display_configuration *display_config)
 {
 	struct pp_hwmgr  *hwmgr;
-	const struct amd_pp_display_configuration *display_config = input;
 
 	PP_CHECK((struct pp_instance *)handle);
 
@@ -653,7 +768,7 @@
 }
 
 int amd_powerplay_get_display_power_level(void *handle,
-		struct amd_pp_dal_clock_info *output)
+		struct amd_pp_simple_clock_info *output)
 {
 	struct pp_hwmgr  *hwmgr;
 
@@ -666,3 +781,86 @@
 
 	return phm_get_dal_power_level(hwmgr, output);
 }
+
+int amd_powerplay_get_current_clocks(void *handle,
+		struct amd_pp_clock_info *clocks)
+{
+	struct pp_hwmgr  *hwmgr;
+	struct amd_pp_simple_clock_info simple_clocks;
+	struct pp_clock_info hw_clocks;
+
+	PP_CHECK((struct pp_instance *)handle);
+
+	if (clocks == NULL)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	phm_get_dal_power_level(hwmgr, &simple_clocks);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
+		if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment))
+			PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1);
+	} else {
+		if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity))
+			PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1);
+	}
+
+	clocks->min_engine_clock = hw_clocks.min_eng_clk;
+	clocks->max_engine_clock = hw_clocks.max_eng_clk;
+	clocks->min_memory_clock = hw_clocks.min_mem_clk;
+	clocks->max_memory_clock = hw_clocks.max_mem_clk;
+	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+
+	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+	clocks->max_clocks_state = simple_clocks.level;
+
+	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
+		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+	}
+
+	return 0;
+
+}
+
+int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+{
+	int result = -1;
+
+	struct pp_hwmgr *hwmgr;
+
+	PP_CHECK((struct pp_instance *)handle);
+
+	if (clocks == NULL)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	result = phm_get_clock_by_type(hwmgr, type, clocks);
+
+	return result;
+}
+
+int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+		struct amd_pp_simple_clock_info *clocks)
+{
+	int result = -1;
+	struct pp_hwmgr  *hwmgr;
+
+	PP_CHECK((struct pp_instance *)handle);
+
+	if (clocks == NULL)
+		return -EINVAL;
+
+	hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+		result = phm_get_max_high_clocks(hwmgr, clocks);
+
+	return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index cf01177..5682490 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -241,6 +241,11 @@
 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
 					PHM_PlatformCaps_DynamicUVDState);
 
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_UVDDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_VCEDPM);
+
 	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
 	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
 	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
@@ -733,7 +738,6 @@
 	unsigned long clock = 0;
 	unsigned long level;
 	unsigned long stable_pstate_sclk;
-	struct PP_Clocks clocks;
 	unsigned long percentage;
 
 	cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
@@ -744,8 +748,10 @@
 	else
 		cz_hwmgr->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
 
-	/*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/
-	clock = clocks.engineClock;
+	clock = hwmgr->display_config.min_core_set_clock;
+;
+	if (clock == 0)
+		printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
 
 	if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
 		cz_hwmgr->sclk_dpm.hard_min_clk = clock;
@@ -901,9 +907,9 @@
 
 		if (pnew_state->action == FORCE_HIGH)
 			cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
-		else if(pnew_state->action == CANCEL_FORCE_HIGH)
-			cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
-		else 
+		else if (pnew_state->action == CANCEL_FORCE_HIGH)
+			cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
+		else
 			cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
 	}
 	return 0;
@@ -1128,9 +1134,10 @@
 				cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
 
 	struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-	struct PP_Clocks clocks;
+	struct PP_Clocks clocks = {0, 0, 0, 0};
 	bool force_high;
-	unsigned long  num_of_active_displays = 4;
+	uint32_t  num_of_active_displays = 0;
+	struct cgs_display_info info = {0};
 
 	cz_ps->evclk = hwmgr->vce_arbiter.evclk;
 	cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
@@ -1142,12 +1149,15 @@
 
 	cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
 
-	/* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */
-	/* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */
+	clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
+				hwmgr->display_config.min_mem_set_clock :
+				cz_hwmgr->sys_info.nbp_memory_clock[1];
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+	num_of_active_displays = info.display_count;
+
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
 		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
-	else
-		clocks.memoryClock = 0;
 
 	if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
 		clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
@@ -1217,6 +1227,7 @@
 		printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
 		return result;
 	}
+	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  CZ_MAX_HARDWARE_POWERLEVELS;
 
 	result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
 	if (result != 0) {
@@ -1648,10 +1659,10 @@
 			& PWRMGT_SEPARATION_TIME_MASK)
 			<< PWRMGT_SEPARATION_TIME_SHIFT;
 
-		data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
+		data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
 			<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
 
-		data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
+		data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
 			<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
 
 		PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
@@ -1666,9 +1677,9 @@
 }
 
 
- static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
 			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
- {
+{
 	struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
 
 	if (separation_time !=
@@ -1696,20 +1707,19 @@
 	return 0;
 }
 
- static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
-		struct amd_pp_dal_clock_info*info)
+static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
+		struct amd_pp_simple_clock_info *info)
 {
 	uint32_t i;
-	const struct phm_clock_voltage_dependency_table * table =
+	const struct phm_clock_voltage_dependency_table *table =
 			hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
-	const struct phm_clock_and_voltage_limits* limits =
+	const struct phm_clock_and_voltage_limits *limits =
 			&hwmgr->dyn_state.max_clock_voltage_on_ac;
 
 	info->engine_max_clock = limits->sclk;
 	info->memory_max_clock = limits->mclk;
 
 	for (i = table->count - 1; i > 0; i--) {
-
 		if (limits->vddc >= table->entries[i].v) {
 			info->level = table->entries[i].clk;
 			return 0;
@@ -1718,6 +1728,158 @@
 	return -EINVAL;
 }
 
+static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, int level)
+{
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetSclkSoftMin,
+				(1 << level));
+		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+				PPSMC_MSG_SetSclkSoftMax,
+				(1 << level));
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct phm_clock_voltage_dependency_table *sclk_table =
+			hwmgr->dyn_state.vddc_dependency_on_sclk;
+	int i, now, size = 0;
+
+	switch (type) {
+	case PP_SCLK:
+		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+				CGS_IND_REG__SMC,
+				ixTARGET_AND_CURRENT_PROFILE_INDEX),
+				TARGET_AND_CURRENT_PROFILE_INDEX,
+				CURR_SCLK_INDEX);
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->entries[i].clk / 100,
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
+static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	const struct cz_power_state *ps;
+	struct cz_hwmgr *data;
+	uint32_t level_index;
+	uint32_t i;
+
+	if (level == NULL || hwmgr == NULL || state == NULL)
+		return -EINVAL;
+
+	data = (struct cz_hwmgr *)(hwmgr->backend);
+	ps = cast_const_PhwCzPowerState(state);
+
+	level_index = index > ps->level - 1 ? ps->level - 1 : index;
+
+	level->coreClock  = ps->levels[level_index].engineClock;
+
+	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
+		for (i = 1; i < ps->level; i++) {
+			if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
+				level->coreClock = ps->levels[i].engineClock;
+				break;
+			}
+		}
+	}
+
+	if (level_index == 0)
+		level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
+	else
+		level->memory_clock = data->sys_info.nbp_memory_clock[0];
+
+	level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
+	level->nonLocalMemoryFreq = 0;
+	level->nonLocalMemoryWidth = 0;
+
+	return 0;
+}
+
+static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+	const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
+
+	clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
+	clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
+
+	return 0;
+}
+
+static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+						struct amd_pp_clocks *clocks)
+{
+	struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+	int i;
+	struct phm_clock_voltage_dependency_table *table;
+
+	clocks->count = cz_get_max_sclk_level(hwmgr);
+	switch (type) {
+	case amd_pp_disp_clock:
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = data->sys_info.display_clock[i];
+		break;
+	case amd_pp_sys_clock:
+		table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = table->entries[i].clk;
+		break;
+	case amd_pp_mem_clock:
+		clocks->count = CZ_NUM_NBPMEMORYCLOCK;
+		for (i = 0; i < clocks->count; i++)
+			clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+	struct phm_clock_voltage_dependency_table *table =
+					hwmgr->dyn_state.vddc_dependency_on_sclk;
+	unsigned long level;
+	const struct phm_clock_and_voltage_limits *limits =
+			&hwmgr->dyn_state.max_clock_voltage_on_ac;
+
+	if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
+		return -EINVAL;
+
+	level = cz_get_max_sclk_level(hwmgr) - 1;
+
+	if (level < table->count)
+		clocks->engine_max_clock = table->entries[level].clk;
+	else
+		clocks->engine_max_clock = table->entries[table->count - 1].clk;
+
+	clocks->memory_max_clock = limits->mclk;
+
+	return 0;
+}
+
 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
 	.backend_init = cz_hwmgr_backend_init,
 	.backend_fini = cz_hwmgr_backend_fini,
@@ -1736,7 +1898,13 @@
 	.print_current_perforce_level = cz_print_current_perforce_level,
 	.set_cpu_power_state = cz_set_cpu_power_state,
 	.store_cc6_data = cz_store_cc6_data,
-	.get_dal_power_level= cz_get_dal_power_level,
+	.force_clock_level = cz_force_clock_level,
+	.print_clock_levels = cz_print_clock_levels,
+	.get_dal_power_level = cz_get_dal_power_level,
+	.get_performance_level = cz_get_performance_level,
+	.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
+	.get_clock_by_type = cz_get_clock_by_type,
+	.get_max_high_clocks = cz_get_max_high_clocks,
 };
 
 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 28031a7..51dedf8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -4275,7 +4275,6 @@
 	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
 		dpm_table->mclk_table.dpm_levels
 			[dpm_table->mclk_table.count - 1].value = mclk;
-
 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 				PHM_PlatformCaps_OD6PlusinACSupport) ||
 			phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -4886,6 +4885,10 @@
 	activity_percent >>= 8;
 
 	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
 }
 
 static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
@@ -5073,6 +5076,125 @@
 				CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
+static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+	*table = (char *)&data->smc_state_table;
+
+	return sizeof(struct SMU73_Discrete_DpmTable);
+}
+
+static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+	void *table = (void *)&data->smc_state_table;
+
+	memcpy(table, buf, size);
+
+	return 0;
+}
+
+static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, int level)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		if (!data->sclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					(1 << level));
+		break;
+	case PP_MCLK:
+		if (!data->mclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					(1 << level));
+		break;
+	case PP_PCIE:
+		if (!data->pcie_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_ForceLevel,
+					(1 << level));
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+	struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+	int i, now, size = 0;
+	uint32_t clock, pcie_speed;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			if (clock > sclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < mclk_table->count; i++) {
+			if (clock > mclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, mclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_PCIE:
+		pcie_speed = fiji_get_current_pcie_speed(hwmgr);
+		for (i = 0; i < pcie_table->count; i++) {
+			if (pcie_speed != pcie_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < pcie_table->count; i++)
+			size += sprintf(buf + size, "%d: %s %s\n", i,
+					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
 	.backend_init = &fiji_hwmgr_backend_init,
 	.backend_fini = &tonga_hwmgr_backend_fini,
@@ -5108,6 +5230,10 @@
 	.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
 	.set_fan_control_mode = fiji_set_fan_control_mode,
 	.get_fan_control_mode = fiji_get_fan_control_mode,
+	.get_pp_table = fiji_get_pp_table,
+	.set_pp_table = fiji_set_pp_table,
+	.force_clock_level = fiji_force_clock_level,
+	.print_clock_levels = fiji_print_clock_levels,
 };
 
 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 22e273b..a16f7cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -29,6 +29,7 @@
 #include "smu73_discrete.h"
 #include "ppatomctrl.h"
 #include "fiji_ppsmc.h"
+#include "pp_endian.h"
 
 #define FIJI_MAX_HARDWARE_POWERLEVELS	2
 #define FIJI_AT_DFLT	30
@@ -347,15 +348,4 @@
 int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
 int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
 
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
 #endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 9deadab..72cfecc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -34,6 +34,11 @@
 	int result = 0;
 	phm_table_function *function;
 
+	if (rt_table->function_list == NULL) {
+		printk(KERN_INFO "[ powerplay ] this function not implement!\n");
+		return 0;
+	}
+
 	for (function = rt_table->function_list; NULL != *function; function++) {
 		int tmp = (*function)(hwmgr, input, output, temp_storage, result);
 
@@ -57,9 +62,9 @@
 	int result = 0;
 	void *temp_storage = NULL;
 
-	if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) {
+	if (hwmgr == NULL || rt_table == NULL) {
 		printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
-		return 0; /*temp return ture because some function not implement on some asic */
+		return -EINVAL;
 	}
 
 	if (0 != rt_table->storage_size) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 0f2d5e4..be31bed 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -26,7 +26,7 @@
 #include "power_state.h"
 #include "pp_acpi.h"
 #include "amd_acpi.h"
-#include "amd_powerplay.h"
+#include "pp_debug.h"
 
 #define PHM_FUNC_CHECK(hw) \
 	do {							\
@@ -313,13 +313,12 @@
 }
 
 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
-		struct amd_pp_dal_clock_info *info)
+		struct amd_pp_simple_clock_info *info)
 {
 	PHM_FUNC_CHECK(hwmgr);
 
 	if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
 		return -EINVAL;
-
 	return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
 }
 
@@ -332,3 +331,91 @@
 
 	return 0;
 }
+
+
+int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	PHM_FUNC_CHECK(hwmgr);
+	if (hwmgr->hwmgr_func->get_performance_level == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
+
+
+}
+
+
+/**
+* Gets Clock Info.
+*
+* @param    pHwMgr  the address of the powerplay hardware manager.
+* @param    pPowerState the address of the Power State structure.
+* @param    pClockInfo the address of PP_ClockInfo structure where the result will be returned.
+* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
+*/
+int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
+			PHM_PerformanceLevelDesignation designation)
+{
+	int result;
+	PHM_PerformanceLevel performance_level;
+
+	PHM_FUNC_CHECK(hwmgr);
+
+	PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
+	PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
+
+	result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
+
+	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
+
+
+	pclock_info->min_mem_clk = performance_level.memory_clock;
+	pclock_info->min_eng_clk = performance_level.coreClock;
+	pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
+
+
+	result = phm_get_performance_level(hwmgr, state, designation,
+					(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
+
+	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
+
+	pclock_info->max_mem_clk = performance_level.memory_clock;
+	pclock_info->max_eng_clk = performance_level.coreClock;
+	pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
+
+	return 0;
+}
+
+int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
+
+}
+
+int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
+
+}
+
+int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+	PHM_FUNC_CHECK(hwmgr);
+
+	if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
+		return -EINVAL;
+
+	return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index b7429a5..b10df32 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -293,7 +293,7 @@
 	}
 
 	if (factor == 1)
-	return (ConvertToFraction(X));
+		return ConvertToFraction(X);
 
 	fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor));
 
@@ -371,7 +371,7 @@
 	fZERO = ConvertToFraction(0);
 
 	if (Equal(Y, fZERO))
-	return fZERO;
+		return fZERO;
 
 	longlongX = (int64_t)X.full;
 	longlongY = (int64_t)Y.full;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 980d3bf..0d5d837 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5185,7 +5185,6 @@
 	mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
 	seq_printf(m, "\n [  mclk  ]: %u MHz\n\n [  sclk  ]: %u MHz\n", mclk/100, sclk/100);
 
-
 	offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
 	activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
 	activity_percent += 0x80;
@@ -5193,6 +5192,9 @@
 
 	seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
 
+	seq_printf(m, "uvd    %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+	seq_printf(m, "vce    %sabled\n", data->vce_power_gated ? "dis" : "en");
 }
 
 static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
@@ -6033,6 +6035,125 @@
 				CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
+static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	*table = (char *)&data->smc_state_table;
+
+	return sizeof(struct SMU72_Discrete_DpmTable);
+}
+
+static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	void *table = (void *)&data->smc_state_table;
+
+	memcpy(table, buf, size);
+
+	return 0;
+}
+
+static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, int level)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		return -EINVAL;
+
+	switch (type) {
+	case PP_SCLK:
+		if (!data->sclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_SCLKDPM_SetEnabledMask,
+					(1 << level));
+		break;
+	case PP_MCLK:
+		if (!data->mclk_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_MCLKDPM_SetEnabledMask,
+					(1 << level));
+		break;
+	case PP_PCIE:
+		if (!data->pcie_dpm_key_disabled)
+			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+					PPSMC_MSG_PCIeDPM_ForceLevel,
+					(1 << level));
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, char *buf)
+{
+	struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+	struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+	struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+	struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+	int i, now, size = 0;
+	uint32_t clock, pcie_speed;
+
+	switch (type) {
+	case PP_SCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < sclk_table->count; i++) {
+			if (clock > sclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < sclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, sclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_MCLK:
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+		for (i = 0; i < mclk_table->count; i++) {
+			if (clock > mclk_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < mclk_table->count; i++)
+			size += sprintf(buf + size, "%d: %uMhz %s\n",
+					i, mclk_table->dpm_levels[i].value / 100,
+					(i == now) ? "*" : "");
+		break;
+	case PP_PCIE:
+		pcie_speed = tonga_get_current_pcie_speed(hwmgr);
+		for (i = 0; i < pcie_table->count; i++) {
+			if (pcie_speed != pcie_table->dpm_levels[i].value)
+				continue;
+			break;
+		}
+		now = i;
+
+		for (i = 0; i < pcie_table->count; i++)
+			size += sprintf(buf + size, "%d: %s %s\n", i,
+					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+					(i == now) ? "*" : "");
+		break;
+	default:
+		break;
+	}
+	return size;
+}
+
 static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
 	.backend_init = &tonga_hwmgr_backend_init,
 	.backend_fini = &tonga_hwmgr_backend_fini,
@@ -6070,6 +6191,10 @@
 	.check_states_equal = tonga_check_states_equal,
 	.set_fan_control_mode = tonga_set_fan_control_mode,
 	.get_fan_control_mode = tonga_get_fan_control_mode,
+	.get_pp_table = tonga_get_pp_table,
+	.set_pp_table = tonga_set_pp_table,
+	.force_clock_level = tonga_force_clock_level,
+	.print_clock_levels = tonga_print_clock_levels,
 };
 
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 49168d2..f88d3bb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -28,6 +28,7 @@
 #include "ppatomctrl.h"
 #include "ppinterrupt.h"
 #include "tonga_powertune.h"
+#include "pp_endian.h"
 
 #define TONGA_MAX_HARDWARE_POWERLEVELS 2
 #define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
@@ -386,17 +387,6 @@
 
 #define TONGA_UNUSED_GPIO_PIN                        0x7F
 
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
 int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
 int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index e61a3e6..7255f7d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,7 @@
 #include "amd_shared.h"
 #include "cgs_common.h"
 
+
 enum amd_pp_event {
 	AMD_PP_EVENT_INITIALIZE = 0,
 	AMD_PP_EVENT_UNINITIALIZE,
@@ -123,6 +124,7 @@
 	AMD_DPM_FORCED_LEVEL_AUTO = 0,
 	AMD_DPM_FORCED_LEVEL_LOW = 1,
 	AMD_DPM_FORCED_LEVEL_HIGH = 2,
+	AMD_DPM_FORCED_LEVEL_MANUAL = 3,
 };
 
 struct amd_pp_init {
@@ -212,12 +214,55 @@
 	uint32_t dce_tolerable_mclk_in_active_latency;
 };
 
-struct amd_pp_dal_clock_info {
+struct amd_pp_simple_clock_info {
 	uint32_t	engine_max_clock;
 	uint32_t	memory_max_clock;
 	uint32_t	level;
 };
 
+enum PP_DAL_POWERLEVEL {
+	PP_DAL_POWERLEVEL_INVALID = 0,
+	PP_DAL_POWERLEVEL_ULTRALOW,
+	PP_DAL_POWERLEVEL_LOW,
+	PP_DAL_POWERLEVEL_NOMINAL,
+	PP_DAL_POWERLEVEL_PERFORMANCE,
+
+	PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
+	PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
+	PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
+	PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
+	PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
+	PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
+	PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
+	PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
+};
+
+struct amd_pp_clock_info {
+	uint32_t min_engine_clock;
+	uint32_t max_engine_clock;
+	uint32_t min_memory_clock;
+	uint32_t max_memory_clock;
+	uint32_t min_bus_bandwidth;
+	uint32_t max_bus_bandwidth;
+	uint32_t max_engine_clock_in_sr;
+	uint32_t min_engine_clock_in_sr;
+	enum PP_DAL_POWERLEVEL max_clocks_state;
+};
+
+enum amd_pp_clock_type {
+	amd_pp_disp_clock = 1,
+	amd_pp_sys_clock,
+	amd_pp_mem_clock
+};
+
+#define MAX_NUM_CLOCKS 16
+
+struct amd_pp_clocks {
+	uint32_t count;
+	uint32_t clock[MAX_NUM_CLOCKS];
+};
+
+
 enum {
 	PP_GROUP_UNKNOWN = 0,
 	PP_GROUP_GFX = 1,
@@ -225,6 +270,17 @@
 	PP_GROUP_MAX
 };
 
+enum pp_clock_type {
+	PP_SCLK,
+	PP_MCLK,
+	PP_PCIE,
+};
+
+struct pp_states_info {
+	uint32_t nums;
+	uint32_t states[16];
+};
+
 #define PP_GROUP_MASK        0xF0000000
 #define PP_GROUP_SHIFT       28
 
@@ -278,6 +334,11 @@
 	int (*get_fan_control_mode)(void *handle);
 	int (*set_fan_speed_percent)(void *handle, uint32_t percent);
 	int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
+	int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+	int (*get_pp_table)(void *handle, char **table);
+	int (*set_pp_table)(void *handle, const char *buf, size_t size);
+	int (*force_clock_level)(void *handle, enum pp_clock_type type, int level);
+	int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
 };
 
 struct amd_powerplay {
@@ -288,12 +349,23 @@
 
 int amd_powerplay_init(struct amd_pp_init *pp_init,
 		       struct amd_powerplay *amd_pp);
+
 int amd_powerplay_fini(void *handle);
 
-int amd_powerplay_display_configuration_change(void *handle, const void *input);
+int amd_powerplay_display_configuration_change(void *handle,
+		const struct amd_pp_display_configuration *input);
 
 int amd_powerplay_get_display_power_level(void *handle,
-		struct amd_pp_dal_clock_info *output);
+		struct amd_pp_simple_clock_info *output);
 
+int amd_powerplay_get_current_clocks(void *handle,
+		struct amd_pp_clock_info *output);
+
+int amd_powerplay_get_clock_by_type(void *handle,
+		enum amd_pp_clock_type type,
+		struct amd_pp_clocks *clocks);
+
+int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+		struct amd_pp_simple_clock_info *output);
 
 #endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 91795ef..040d3f7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -31,6 +31,7 @@
 enum amd_dpm_forced_level;
 struct PP_TemperatureRange;
 
+
 struct phm_fan_speed_info {
 	uint32_t min_percent;
 	uint32_t max_percent;
@@ -290,6 +291,15 @@
 	uint32_t engineClockInSR;
 };
 
+struct pp_clock_info {
+	uint32_t min_mem_clk;
+	uint32_t max_mem_clk;
+	uint32_t min_eng_clk;
+	uint32_t max_eng_clk;
+	uint32_t min_bus_bandwidth;
+	uint32_t max_bus_bandwidth;
+};
+
 struct phm_platform_descriptor {
 	uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES];
 	uint32_t vbiosInterruptId;
@@ -323,24 +333,6 @@
 	uint32_t clock[MAX_NUM_CLOCKS];
 };
 
-enum PP_DAL_POWERLEVEL {
-	PP_DAL_POWERLEVEL_INVALID = 0,
-	PP_DAL_POWERLEVEL_ULTRALOW,
-	PP_DAL_POWERLEVEL_LOW,
-	PP_DAL_POWERLEVEL_NOMINAL,
-	PP_DAL_POWERLEVEL_PERFORMANCE,
-
-	PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
-	PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
-	PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
-	PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
-	PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
-	PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
-	PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
-	PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
-};
-
-
 extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
 extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
 extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
@@ -375,11 +367,25 @@
 		const struct amd_pp_display_configuration *display_config);
 
 extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
-		struct amd_pp_dal_clock_info*info);
+		struct amd_pp_simple_clock_info *info);
 
 extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr);
 
 extern int phm_power_down_asic(struct pp_hwmgr *hwmgr);
 
+extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level);
+
+extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+			struct pp_clock_info *pclock_info,
+			PHM_PerformanceLevelDesignation designation);
+
+extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info);
+
+extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
+
+extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
+
 #endif /* _HARDWARE_MANAGER_H_ */
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index aeaa3db..928f5a7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -325,8 +325,18 @@
 				bool cc6_disable, bool pstate_disable,
 				bool pstate_switch_disable);
 	int (*get_dal_power_level)(struct pp_hwmgr *hwmgr,
-				   struct amd_pp_dal_clock_info *info);
+			struct amd_pp_simple_clock_info *info);
+	int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *,
+			PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *);
+	int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr,
+				const struct pp_hw_power_state *state, struct pp_clock_info *clock_info);
+	int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
+	int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
 	int (*power_off_asic)(struct pp_hwmgr *hwmgr);
+	int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
+	int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
+	int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level);
+	int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
 };
 
 struct pp_table_func {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
similarity index 67%
copy from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
copy to drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
index b3839dc2..f49d196 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,22 +19,20 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
 
-static const struct nvkm_subdev_func
-gm204_ibus = {
-	.intr = gk104_ibus_intr,
-};
+#ifndef _PP_ENDIAN_H_
+#define _PP_ENDIAN_H_
 
-int
-gm204_ibus_new(struct nvkm_device *device, int index,
-	       struct nvkm_subdev **pibus)
-{
-	struct nvkm_subdev *ibus;
-	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
-		return -ENOMEM;
-	nvkm_subdev_ctor(&gm204_ibus, device, index, 0, ibus);
-	return 0;
-}
+#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
+#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
+
+#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
+#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
+
+#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
+#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
+
+#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
+
+#endif /* _PP_ENDIAN_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 504f035..fc9e3d1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -32,6 +32,27 @@
 #define smu_lower_32_bits(n) ((uint32_t)(n))
 #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
 
+enum AVFS_BTC_STATUS {
+	AVFS_BTC_BOOT = 0,
+	AVFS_BTC_BOOT_STARTEDSMU,
+	AVFS_LOAD_VIRUS,
+	AVFS_BTC_VIRUS_LOADED,
+	AVFS_BTC_VIRUS_FAIL,
+	AVFS_BTC_COMPLETED_PREVIOUSLY,
+	AVFS_BTC_ENABLEAVFS,
+	AVFS_BTC_STARTED,
+	AVFS_BTC_FAILED,
+	AVFS_BTC_RESTOREVFT_FAILED,
+	AVFS_BTC_SAVEVFT_FAILED,
+	AVFS_BTC_DPMTABLESETUP_FAILED,
+	AVFS_BTC_COMPLETED_UNSAVED,
+	AVFS_BTC_COMPLETED_SAVED,
+	AVFS_BTC_COMPLETED_RESTORED,
+	AVFS_BTC_DISABLED,
+	AVFS_BTC_NOTSUPPORTED,
+	AVFS_BTC_SMUMSG_ERROR
+};
+
 struct pp_smumgr_func {
 	int (*smu_init)(struct pp_smumgr *smumgr);
 	int (*smu_fini)(struct pp_smumgr *smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 8cd22d9..b4eb483 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,24 +23,6 @@
 #ifndef _FIJI_SMUMANAGER_H_
 #define _FIJI_SMUMANAGER_H_
 
-enum AVFS_BTC_STATUS {
-	AVFS_BTC_BOOT = 0,
-	AVFS_BTC_BOOT_STARTEDSMU,
-	AVFS_LOAD_VIRUS,
-	AVFS_BTC_VIRUS_LOADED,
-	AVFS_BTC_VIRUS_FAIL,
-	AVFS_BTC_STARTED,
-	AVFS_BTC_FAILED,
-	AVFS_BTC_RESTOREVFT_FAILED,
-	AVFS_BTC_SAVEVFT_FAILED,
-	AVFS_BTC_DPMTABLESETUP_FAILED,
-	AVFS_BTC_COMPLETED_UNSAVED,
-	AVFS_BTC_COMPLETED_SAVED,
-	AVFS_BTC_COMPLETED_RESTORED,
-	AVFS_BTC_DISABLED,
-	AVFS_BTC_NOTSUPPORTED,
-	AVFS_BTC_SMUMSG_ERROR
-};
 
 struct fiji_smu_avfs {
 	enum AVFS_BTC_STATUS AvfsBtcStatus;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 8b2becd..a5ff945 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -229,6 +229,14 @@
 	amd_sched_wakeup(entity->sched);
 }
 
+static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_entity *entity =
+		container_of(cb, struct amd_sched_entity, cb);
+	entity->dependency = NULL;
+	fence_put(f);
+}
+
 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 {
 	struct amd_gpu_scheduler *sched = entity->sched;
@@ -251,7 +259,7 @@
 		}
 
 		/* Wait for fence to be scheduled */
-		entity->cb.func = amd_sched_entity_wakeup;
+		entity->cb.func = amd_sched_entity_clear_dep;
 		list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
 		return true;
 	}
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 87c78ee..dc115ae 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -84,12 +84,33 @@
 	return true;
 }
 
-static void amd_sched_fence_release(struct fence *f)
+/**
+ * amd_sched_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the fence memory after the RCU grace period.
+ */
+static void amd_sched_fence_free(struct rcu_head *rcu)
 {
+	struct fence *f = container_of(rcu, struct fence, rcu);
 	struct amd_sched_fence *fence = to_amd_sched_fence(f);
 	kmem_cache_free(sched_fence_slab, fence);
 }
 
+/**
+ * amd_sched_fence_release - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void amd_sched_fence_release(struct fence *f)
+{
+	call_rcu(&f->rcu, amd_sched_fence_free);
+}
+
 const struct fence_ops amd_sched_fence_ops = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
new file mode 100644
index 0000000..eaed454
--- /dev/null
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -0,0 +1,27 @@
+config DRM_ARM
+	bool
+	help
+	  Choose this option to select drivers for ARM's devices
+
+config DRM_HDLCD
+	tristate "ARM HDLCD"
+	depends on DRM && OF && (ARM || ARM64)
+	depends on COMMON_CLK
+	select DRM_ARM
+	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
+	select DRM_KMS_CMA_HELPER
+	help
+	  Choose this option if you have an ARM High Definition Colour LCD
+	  controller.
+
+	  If M is selected the module will be called hdlcd.
+
+config DRM_HDLCD_SHOW_UNDERRUN
+	bool "Show underrun conditions"
+	depends on DRM_HDLCD
+	default n
+	help
+	  Enable this option to show in red colour the pixels that the
+	  HDLCD device did not fetch from framebuffer due to underrun
+	  conditions.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
new file mode 100644
index 0000000..89dcb7b
--- /dev/null
+++ b/drivers/gpu/drm/arm/Makefile
@@ -0,0 +1,2 @@
+hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
+obj-$(CONFIG_DRM_HDLCD)	+= hdlcd.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
new file mode 100644
index 0000000..fef1b04
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  Implementation of a CRTC class for the HDLCD driver.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+#include <video/videomode.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+/*
+ * The HDLCD controller is a dumb RGB streamer that gets connected to
+ * a single HDMI transmitter or in the case of the ARM Models it gets
+ * emulated by the software that does the actual rendering.
+ *
+ */
+
+static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+
+/*
+ * Setup the HDLCD registers for decoding the pixels out of the framebuffer
+ */
+static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
+{
+	unsigned int btpp;
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+	uint32_t pixel_format;
+	struct simplefb_format *format = NULL;
+	int i;
+
+	pixel_format = crtc->primary->state->fb->pixel_format;
+
+	for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+		if (supported_formats[i].fourcc == pixel_format)
+			format = &supported_formats[i];
+	}
+
+	if (WARN_ON(!format))
+		return 0;
+
+	/* HDLCD uses 'bytes per pixel', zero means 1 byte */
+	btpp = (format->bits_per_pixel + 7) / 8;
+	hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3);
+
+	/*
+	 * The format of the HDLCD_REG_<color>_SELECT register is:
+	 *   - bits[23:16] - default value for that color component
+	 *   - bits[11:8]  - number of bits to extract for each color component
+	 *   - bits[4:0]   - index of the lowest bit to extract
+	 *
+	 * The default color value is used when bits[11:8] are zero, when the
+	 * pixel is outside the visible frame area or when there is a
+	 * buffer underrun.
+	 */
+	hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset |
+#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN
+		    0x00ff0000 |	/* show underruns in red */
+#endif
+		    ((format->red.length & 0xf) << 8));
+	hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset |
+		    ((format->green.length & 0xf) << 8));
+	hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset |
+		    ((format->blue.length & 0xf) << 8));
+
+	return 0;
+}
+
+static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+	struct drm_display_mode *m = &crtc->state->adjusted_mode;
+	struct videomode vm;
+	unsigned int polarities, line_length, err;
+
+	vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
+	vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
+	vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start;
+	vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay;
+	vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end;
+	vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start;
+
+	polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA;
+
+	if (m->flags & DRM_MODE_FLAG_PHSYNC)
+		polarities |= HDLCD_POLARITY_HSYNC;
+	if (m->flags & DRM_MODE_FLAG_PVSYNC)
+		polarities |= HDLCD_POLARITY_VSYNC;
+
+	line_length = crtc->primary->state->fb->pitches[0];
+
+	/* Allow max number of outstanding requests and largest burst size */
+	hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
+		    HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
+
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
+	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
+	hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
+
+	err = hdlcd_set_pxl_fmt(crtc);
+	if (err)
+		return;
+
+	clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
+}
+
+static void hdlcd_crtc_enable(struct drm_crtc *crtc)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+	clk_prepare_enable(hdlcd->clk);
+	hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+	drm_crtc_vblank_on(crtc);
+}
+
+static void hdlcd_crtc_disable(struct drm_crtc *crtc)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+	if (!crtc->primary->fb)
+		return;
+
+	clk_disable_unprepare(hdlcd->clk);
+	hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+	drm_crtc_vblank_off(crtc);
+}
+
+static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *state)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+	struct drm_display_mode *mode = &state->adjusted_mode;
+	long rate, clk_rate = mode->clock * 1000;
+
+	rate = clk_round_rate(hdlcd->clk, clk_rate);
+	if (rate != clk_rate) {
+		/* clock required by mode not supported by hardware */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_crtc_state *state)
+{
+	struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+	unsigned long flags;
+
+	if (crtc->state->event) {
+		struct drm_pending_vblank_event *event = crtc->state->event;
+
+		crtc->state->event = NULL;
+		event->pipe = drm_crtc_index(crtc);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		list_add_tail(&event->base.link, &hdlcd->event_list);
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+}
+
+static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *state)
+{
+}
+
+static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
+	.mode_fixup	= hdlcd_crtc_mode_fixup,
+	.mode_set	= drm_helper_crtc_mode_set,
+	.mode_set_base	= drm_helper_crtc_mode_set_base,
+	.mode_set_nofb	= hdlcd_crtc_mode_set_nofb,
+	.enable		= hdlcd_crtc_enable,
+	.disable	= hdlcd_crtc_disable,
+	.prepare	= hdlcd_crtc_disable,
+	.commit		= hdlcd_crtc_enable,
+	.atomic_check	= hdlcd_crtc_atomic_check,
+	.atomic_begin	= hdlcd_crtc_atomic_begin,
+	.atomic_flush	= hdlcd_crtc_atomic_flush,
+};
+
+static int hdlcd_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_plane_state *state)
+{
+	return 0;
+}
+
+static void hdlcd_plane_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *state)
+{
+	struct hdlcd_drm_private *hdlcd;
+	struct drm_gem_cma_object *gem;
+	dma_addr_t scanout_start;
+
+	if (!plane->state->crtc || !plane->state->fb)
+		return;
+
+	hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+	gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
+	scanout_start = gem->paddr;
+	hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
+}
+
+static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
+	.prepare_fb = NULL,
+	.cleanup_fb = NULL,
+	.atomic_check = hdlcd_plane_atomic_check,
+	.atomic_update = hdlcd_plane_atomic_update,
+};
+
+static void hdlcd_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_helper_disable(plane);
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs hdlcd_plane_funcs = {
+	.update_plane		= drm_atomic_helper_update_plane,
+	.disable_plane		= drm_atomic_helper_disable_plane,
+	.destroy		= hdlcd_plane_destroy,
+	.reset			= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	struct drm_plane *plane = NULL;
+	u32 formats[ARRAY_SIZE(supported_formats)], i;
+	int ret;
+
+	plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+	if (!plane)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
+		formats[i] = supported_formats[i].fourcc;
+
+	ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
+				       formats, ARRAY_SIZE(formats),
+				       DRM_PLANE_TYPE_PRIMARY, NULL);
+	if (ret) {
+		devm_kfree(drm->dev, plane);
+		return ERR_PTR(ret);
+	}
+
+	drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
+	hdlcd->plane = plane;
+
+	return plane;
+}
+
+void hdlcd_crtc_suspend(struct drm_crtc *crtc)
+{
+	hdlcd_crtc_disable(crtc);
+}
+
+void hdlcd_crtc_resume(struct drm_crtc *crtc)
+{
+	hdlcd_crtc_enable(crtc);
+}
+
+int hdlcd_setup_crtc(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	struct drm_plane *primary;
+	int ret;
+
+	primary = hdlcd_plane_init(drm);
+	if (IS_ERR(primary))
+		return PTR_ERR(primary);
+
+	ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
+					&hdlcd_crtc_funcs, NULL);
+	if (ret) {
+		hdlcd_plane_destroy(primary);
+		devm_kfree(drm->dev, primary);
+		return ret;
+	}
+
+	drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
+	return 0;
+}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
new file mode 100644
index 0000000..56b829f
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  ARM HDLCD Driver
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+static int hdlcd_load(struct drm_device *drm, unsigned long flags)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	struct platform_device *pdev = to_platform_device(drm->dev);
+	struct resource *res;
+	u32 version;
+	int ret;
+
+	hdlcd->clk = devm_clk_get(drm->dev, "pxlclk");
+	if (IS_ERR(hdlcd->clk))
+		return PTR_ERR(hdlcd->clk);
+
+#ifdef CONFIG_DEBUG_FS
+	atomic_set(&hdlcd->buffer_underrun_count, 0);
+	atomic_set(&hdlcd->bus_error_count, 0);
+	atomic_set(&hdlcd->vsync_count, 0);
+	atomic_set(&hdlcd->dma_end_count, 0);
+#endif
+
+	INIT_LIST_HEAD(&hdlcd->event_list);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
+	if (IS_ERR(hdlcd->mmio)) {
+		DRM_ERROR("failed to map control registers area\n");
+		ret = PTR_ERR(hdlcd->mmio);
+		hdlcd->mmio = NULL;
+		goto fail;
+	}
+
+	version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
+	if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
+		DRM_ERROR("unknown product id: 0x%x\n", version);
+		ret = -EINVAL;
+		goto fail;
+	}
+	DRM_INFO("found ARM HDLCD version r%dp%d\n",
+		(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
+		version & HDLCD_VERSION_MINOR_MASK);
+
+	/* Get the optional framebuffer memory resource */
+	ret = of_reserved_mem_device_init(drm->dev);
+	if (ret && ret != -ENODEV)
+		goto fail;
+
+	ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto setup_fail;
+
+	ret = hdlcd_setup_crtc(drm);
+	if (ret < 0) {
+		DRM_ERROR("failed to create crtc\n");
+		goto setup_fail;
+	}
+
+	pm_runtime_enable(drm->dev);
+
+	pm_runtime_get_sync(drm->dev);
+	ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+	pm_runtime_put_sync(drm->dev);
+	if (ret < 0) {
+		DRM_ERROR("failed to install IRQ handler\n");
+		goto irq_fail;
+	}
+
+	return 0;
+
+irq_fail:
+	drm_crtc_cleanup(&hdlcd->crtc);
+setup_fail:
+	of_reserved_mem_device_release(drm->dev);
+fail:
+	devm_clk_put(drm->dev, hdlcd->clk);
+
+	return ret;
+}
+
+static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+	if (hdlcd->fbdev)
+		drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
+}
+
+static int hdlcd_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *state, bool async)
+{
+	return drm_atomic_helper_commit(dev, state, false);
+}
+
+static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
+	.fb_create = drm_fb_cma_create,
+	.output_poll_changed = hdlcd_fb_output_poll_changed,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = hdlcd_atomic_commit,
+};
+
+static void hdlcd_setup_mode_config(struct drm_device *drm)
+{
+	drm_mode_config_init(drm);
+	drm->mode_config.min_width = 0;
+	drm->mode_config.min_height = 0;
+	drm->mode_config.max_width = HDLCD_MAX_XRES;
+	drm->mode_config.max_height = HDLCD_MAX_YRES;
+	drm->mode_config.funcs = &hdlcd_mode_config_funcs;
+}
+
+static void hdlcd_lastclose(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+	drm_fbdev_cma_restore_mode(hdlcd->fbdev);
+}
+
+static irqreturn_t hdlcd_irq(int irq, void *arg)
+{
+	struct drm_device *drm = arg;
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	unsigned long irq_status;
+
+	irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
+
+#ifdef CONFIG_DEBUG_FS
+	if (irq_status & HDLCD_INTERRUPT_UNDERRUN)
+		atomic_inc(&hdlcd->buffer_underrun_count);
+
+	if (irq_status & HDLCD_INTERRUPT_DMA_END)
+		atomic_inc(&hdlcd->dma_end_count);
+
+	if (irq_status & HDLCD_INTERRUPT_BUS_ERROR)
+		atomic_inc(&hdlcd->bus_error_count);
+
+	if (irq_status & HDLCD_INTERRUPT_VSYNC)
+		atomic_inc(&hdlcd->vsync_count);
+
+#endif
+	if (irq_status & HDLCD_INTERRUPT_VSYNC) {
+		bool events_sent = false;
+		unsigned long flags;
+		struct drm_pending_vblank_event	*e, *t;
+
+		drm_crtc_handle_vblank(&hdlcd->crtc);
+
+		spin_lock_irqsave(&drm->event_lock, flags);
+		list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
+			list_del(&e->base.link);
+			drm_crtc_send_vblank_event(&hdlcd->crtc, e);
+			events_sent = true;
+		}
+		if (events_sent)
+			drm_crtc_vblank_put(&hdlcd->crtc);
+		spin_unlock_irqrestore(&drm->event_lock, flags);
+	}
+
+	/* acknowledge interrupt(s) */
+	hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
+
+	return IRQ_HANDLED;
+}
+
+static void hdlcd_irq_preinstall(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	/* Ensure interrupts are disabled */
+	hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
+	hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
+}
+
+static int hdlcd_irq_postinstall(struct drm_device *drm)
+{
+#ifdef CONFIG_DEBUG_FS
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+	/* enable debug interrupts */
+	irq_mask |= HDLCD_DEBUG_INT_MASK;
+
+	hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+#endif
+	return 0;
+}
+
+static void hdlcd_irq_uninstall(struct drm_device *drm)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	/* disable all the interrupts that we might have enabled */
+	unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+#ifdef CONFIG_DEBUG_FS
+	/* disable debug interrupts */
+	irq_mask &= ~HDLCD_DEBUG_INT_MASK;
+#endif
+
+	/* disable vsync interrupts */
+	irq_mask &= ~HDLCD_INTERRUPT_VSYNC;
+
+	hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+}
+
+static int hdlcd_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+	hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC);
+
+	return 0;
+}
+
+static void hdlcd_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+	hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *drm = node->minor->dev;
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+	seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
+	seq_printf(m, "dma_end  : %d\n", atomic_read(&hdlcd->dma_end_count));
+	seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count));
+	seq_printf(m, "vsync    : %d\n", atomic_read(&hdlcd->vsync_count));
+	return 0;
+}
+
+static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *drm = node->minor->dev;
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+	unsigned long clkrate = clk_get_rate(hdlcd->clk);
+	unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
+
+	seq_printf(m, "hw  : %lu\n", clkrate);
+	seq_printf(m, "mode: %lu\n", mode_clock);
+	return 0;
+}
+
+static struct drm_info_list hdlcd_debugfs_list[] = {
+	{ "interrupt_count", hdlcd_show_underrun_count, 0 },
+	{ "clocks", hdlcd_show_pxlclock, 0 },
+};
+
+static int hdlcd_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(hdlcd_debugfs_list,
+		ARRAY_SIZE(hdlcd_debugfs_list),	minor->debugfs_root, minor);
+}
+
+static void hdlcd_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(hdlcd_debugfs_list,
+		ARRAY_SIZE(hdlcd_debugfs_list), minor);
+}
+#endif
+
+static const struct file_operations fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= noop_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver hdlcd_driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+			   DRIVER_MODESET | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
+	.lastclose = hdlcd_lastclose,
+	.irq_handler = hdlcd_irq,
+	.irq_preinstall = hdlcd_irq_preinstall,
+	.irq_postinstall = hdlcd_irq_postinstall,
+	.irq_uninstall = hdlcd_irq_uninstall,
+	.get_vblank_counter = drm_vblank_no_hw_counter,
+	.enable_vblank = hdlcd_enable_vblank,
+	.disable_vblank = hdlcd_disable_vblank,
+	.gem_free_object = drm_gem_cma_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.dumb_create = drm_gem_cma_dumb_create,
+	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init = hdlcd_debugfs_init,
+	.debugfs_cleanup = hdlcd_debugfs_cleanup,
+#endif
+	.fops = &fops,
+	.name = "hdlcd",
+	.desc = "ARM HDLCD Controller DRM",
+	.date = "20151021",
+	.major = 1,
+	.minor = 0,
+};
+
+static int hdlcd_drm_bind(struct device *dev)
+{
+	struct drm_device *drm;
+	struct hdlcd_drm_private *hdlcd;
+	int ret;
+
+	hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
+	if (!hdlcd)
+		return -ENOMEM;
+
+	drm = drm_dev_alloc(&hdlcd_driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	drm->dev_private = hdlcd;
+	hdlcd_setup_mode_config(drm);
+	ret = hdlcd_load(drm, 0);
+	if (ret)
+		goto err_free;
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto err_unload;
+
+	dev_set_drvdata(dev, drm);
+
+	ret = component_bind_all(dev, drm);
+	if (ret) {
+		DRM_ERROR("Failed to bind all components\n");
+		goto err_unregister;
+	}
+
+	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialise vblank\n");
+		goto err_vblank;
+	}
+	drm->vblank_disable_allowed = true;
+
+	drm_mode_config_reset(drm);
+	drm_kms_helper_poll_init(drm);
+
+	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+					  drm->mode_config.num_connector);
+
+	if (IS_ERR(hdlcd->fbdev)) {
+		ret = PTR_ERR(hdlcd->fbdev);
+		hdlcd->fbdev = NULL;
+		goto err_fbdev;
+	}
+
+	return 0;
+
+err_fbdev:
+	drm_kms_helper_poll_fini(drm);
+	drm_mode_config_cleanup(drm);
+	drm_vblank_cleanup(drm);
+err_vblank:
+	component_unbind_all(dev, drm);
+err_unregister:
+	drm_dev_unregister(drm);
+err_unload:
+	pm_runtime_get_sync(drm->dev);
+	drm_irq_uninstall(drm);
+	pm_runtime_put_sync(drm->dev);
+	pm_runtime_disable(drm->dev);
+	of_reserved_mem_device_release(drm->dev);
+	devm_clk_put(dev, hdlcd->clk);
+err_free:
+	drm_dev_unref(drm);
+
+	return ret;
+}
+
+static void hdlcd_drm_unbind(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+	if (hdlcd->fbdev) {
+		drm_fbdev_cma_fini(hdlcd->fbdev);
+		hdlcd->fbdev = NULL;
+	}
+	drm_kms_helper_poll_fini(drm);
+	component_unbind_all(dev, drm);
+	drm_vblank_cleanup(drm);
+	pm_runtime_get_sync(drm->dev);
+	drm_irq_uninstall(drm);
+	pm_runtime_put_sync(drm->dev);
+	pm_runtime_disable(drm->dev);
+	of_reserved_mem_device_release(drm->dev);
+	if (!IS_ERR(hdlcd->clk)) {
+		devm_clk_put(drm->dev, hdlcd->clk);
+		hdlcd->clk = NULL;
+	}
+	drm_mode_config_cleanup(drm);
+	drm_dev_unregister(drm);
+	drm_dev_unref(drm);
+	drm->dev_private = NULL;
+	dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops hdlcd_master_ops = {
+	.bind		= hdlcd_drm_bind,
+	.unbind		= hdlcd_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int hdlcd_probe(struct platform_device *pdev)
+{
+	struct device_node *port, *ep;
+	struct component_match *match = NULL;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	/* there is only one output port inside each device, find it */
+	ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+	if (!ep)
+		return -ENODEV;
+
+	if (!of_device_is_available(ep)) {
+		of_node_put(ep);
+		return -ENODEV;
+	}
+
+	/* add the remote encoder port as component */
+	port = of_graph_get_remote_port_parent(ep);
+	of_node_put(ep);
+	if (!port || !of_device_is_available(port)) {
+		of_node_put(port);
+		return -EAGAIN;
+	}
+
+	component_match_add(&pdev->dev, &match, compare_dev, port);
+
+	return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
+					       match);
+}
+
+static int hdlcd_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &hdlcd_master_ops);
+	return 0;
+}
+
+static const struct of_device_id  hdlcd_of_match[] = {
+	{ .compatible	= "arm,hdlcd" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, hdlcd_of_match);
+
+static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct drm_crtc *crtc;
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	drm_modeset_lock_all(drm);
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+		hdlcd_crtc_suspend(crtc);
+	drm_modeset_unlock_all(drm);
+	return 0;
+}
+
+static int __maybe_unused hdlcd_pm_resume(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct drm_crtc *crtc;
+
+	if (!pm_runtime_suspended(dev))
+		return 0;
+
+	drm_modeset_lock_all(drm);
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+		hdlcd_crtc_resume(crtc);
+	drm_modeset_unlock_all(drm);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
+
+static struct platform_driver hdlcd_platform_driver = {
+	.probe		= hdlcd_probe,
+	.remove		= hdlcd_remove,
+	.driver	= {
+		.name = "hdlcd",
+		.pm = &hdlcd_pm_ops,
+		.of_match_table	= hdlcd_of_match,
+	},
+};
+
+module_platform_driver(hdlcd_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau");
+MODULE_DESCRIPTION("ARM HDLCD DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
new file mode 100644
index 0000000..aa23478
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -0,0 +1,42 @@
+/*
+ *  ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_DRV_H__
+#define __HDLCD_DRV_H__
+
+struct hdlcd_drm_private {
+	void __iomem			*mmio;
+	struct clk			*clk;
+	struct drm_fbdev_cma		*fbdev;
+	struct drm_framebuffer		*fb;
+	struct list_head		event_list;
+	struct drm_crtc			crtc;
+	struct drm_plane		*plane;
+#ifdef CONFIG_DEBUG_FS
+	atomic_t buffer_underrun_count;
+	atomic_t bus_error_count;
+	atomic_t vsync_count;
+	atomic_t dma_end_count;
+#endif
+};
+
+#define crtc_to_hdlcd_priv(x)	container_of(x, struct hdlcd_drm_private, crtc)
+
+static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
+			       unsigned int reg, u32 value)
+{
+	writel(value, hdlcd->mmio + reg);
+}
+
+static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
+{
+	return readl(hdlcd->mmio + reg);
+}
+
+int hdlcd_setup_crtc(struct drm_device *dev);
+void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
+void hdlcd_crtc_suspend(struct drm_crtc *crtc);
+void hdlcd_crtc_resume(struct drm_crtc *crtc);
+
+#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/hdlcd_regs.h b/drivers/gpu/drm/arm/hdlcd_regs.h
new file mode 100644
index 0000000..66799eb
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_regs.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013,2014 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_REGS_H__
+#define __HDLCD_REGS_H__
+
+/* register offsets */
+#define HDLCD_REG_VERSION		0x0000	/* ro */
+#define HDLCD_REG_INT_RAWSTAT		0x0010	/* rw */
+#define HDLCD_REG_INT_CLEAR		0x0014	/* wo */
+#define HDLCD_REG_INT_MASK		0x0018	/* rw */
+#define HDLCD_REG_INT_STATUS		0x001c	/* ro */
+#define HDLCD_REG_FB_BASE		0x0100	/* rw */
+#define HDLCD_REG_FB_LINE_LENGTH	0x0104	/* rw */
+#define HDLCD_REG_FB_LINE_COUNT		0x0108	/* rw */
+#define HDLCD_REG_FB_LINE_PITCH		0x010c	/* rw */
+#define HDLCD_REG_BUS_OPTIONS		0x0110	/* rw */
+#define HDLCD_REG_V_SYNC		0x0200	/* rw */
+#define HDLCD_REG_V_BACK_PORCH		0x0204	/* rw */
+#define HDLCD_REG_V_DATA		0x0208	/* rw */
+#define HDLCD_REG_V_FRONT_PORCH		0x020c	/* rw */
+#define HDLCD_REG_H_SYNC		0x0210	/* rw */
+#define HDLCD_REG_H_BACK_PORCH		0x0214	/* rw */
+#define HDLCD_REG_H_DATA		0x0218	/* rw */
+#define HDLCD_REG_H_FRONT_PORCH		0x021c	/* rw */
+#define HDLCD_REG_POLARITIES		0x0220	/* rw */
+#define HDLCD_REG_COMMAND		0x0230	/* rw */
+#define HDLCD_REG_PIXEL_FORMAT		0x0240	/* rw */
+#define HDLCD_REG_RED_SELECT		0x0244	/* rw */
+#define HDLCD_REG_GREEN_SELECT		0x0248	/* rw */
+#define HDLCD_REG_BLUE_SELECT		0x024c	/* rw */
+
+/* version */
+#define HDLCD_PRODUCT_ID		0x1CDC0000
+#define HDLCD_PRODUCT_MASK		0xFFFF0000
+#define HDLCD_VERSION_MAJOR_MASK	0x0000FF00
+#define HDLCD_VERSION_MINOR_MASK	0x000000FF
+
+/* interrupts */
+#define HDLCD_INTERRUPT_DMA_END		(1 << 0)
+#define HDLCD_INTERRUPT_BUS_ERROR	(1 << 1)
+#define HDLCD_INTERRUPT_VSYNC		(1 << 2)
+#define HDLCD_INTERRUPT_UNDERRUN	(1 << 3)
+#define HDLCD_DEBUG_INT_MASK		(HDLCD_INTERRUPT_DMA_END |  \
+					HDLCD_INTERRUPT_BUS_ERROR | \
+					HDLCD_INTERRUPT_UNDERRUN)
+
+/* polarities */
+#define HDLCD_POLARITY_VSYNC		(1 << 0)
+#define HDLCD_POLARITY_HSYNC		(1 << 1)
+#define HDLCD_POLARITY_DATAEN		(1 << 2)
+#define HDLCD_POLARITY_DATA		(1 << 3)
+#define HDLCD_POLARITY_PIXELCLK		(1 << 4)
+
+/* commands */
+#define HDLCD_COMMAND_DISABLE		(0 << 0)
+#define HDLCD_COMMAND_ENABLE		(1 << 0)
+
+/* pixel format */
+#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN	(0 << 31)
+#define HDLCD_PIXEL_FMT_BIG_ENDIAN	(1 << 31)
+#define HDLCD_BYTES_PER_PIXEL_MASK	(3 << 3)
+
+/* bus options */
+#define HDLCD_BUS_BURST_MASK		0x01f
+#define HDLCD_BUS_MAX_OUTSTAND		0xf00
+#define HDLCD_BUS_BURST_NONE		(0 << 0)
+#define HDLCD_BUS_BURST_1		(1 << 0)
+#define HDLCD_BUS_BURST_2		(1 << 1)
+#define HDLCD_BUS_BURST_4		(1 << 2)
+#define HDLCD_BUS_BURST_8		(1 << 3)
+#define HDLCD_BUS_BURST_16		(1 << 4)
+
+/* Max resolution supported is 4096x4096, 32bpp */
+#define HDLCD_MAX_XRES			4096
+#define HDLCD_MAX_YRES			4096
+
+#define NR_PALETTE			256
+
+#endif /* __HDLCD_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 3bd7e1c..82043c2 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -188,9 +188,6 @@
 
 static struct drm_driver armada_drm_driver = {
 	.load			= armada_drm_load,
-	.open			= NULL,
-	.preclose		= NULL,
-	.postclose		= NULL,
 	.lastclose		= armada_drm_lastclose,
 	.unload			= armada_drm_unload,
 	.set_busid		= drm_platform_set_busid,
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 0123458..a965e7e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -497,13 +497,6 @@
 	}
 }
 
-static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 /* ast is different - we will force move buffers out of VRAM */
 static int ast_crtc_do_set_base(struct drm_crtc *crtc,
 				struct drm_framebuffer *fb,
@@ -617,7 +610,6 @@
 
 static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
 	.dpms = ast_crtc_dpms,
-	.mode_fixup = ast_crtc_mode_fixup,
 	.mode_set = ast_crtc_mode_set,
 	.mode_set_base = ast_crtc_mode_set_base,
 	.disable = ast_crtc_disable,
@@ -710,13 +702,6 @@
 
 }
 
-static bool ast_mode_fixup(struct drm_encoder *encoder,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void ast_encoder_mode_set(struct drm_encoder *encoder,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode)
@@ -736,7 +721,6 @@
 
 static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
 	.dpms = ast_encoder_dpms,
-	.mode_fixup = ast_mode_fixup,
 	.prepare = ast_encoder_prepare,
 	.commit = ast_encoder_commit,
 	.mode_set = ast_encoder_mode_set,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 468a14f..58c4f78 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -121,13 +121,6 @@
 			   cfg);
 }
 
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
 {
 	struct drm_device *dev = c->dev;
@@ -261,7 +254,6 @@
 }
 
 static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
-	.mode_fixup = atmel_hlcdc_crtc_mode_fixup,
 	.mode_set = drm_helper_crtc_mode_set,
 	.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
 	.mode_set_base = drm_helper_crtc_mode_set_base,
@@ -280,24 +272,6 @@
 	kfree(crtc);
 }
 
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c,
-				       struct drm_file *file)
-{
-	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
-	struct drm_pending_vblank_event *event;
-	struct drm_device *dev = c->dev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	event = crtc->event;
-	if (event && event->base.file_priv == file) {
-		event->base.destroy(&event->base);
-		drm_vblank_put(dev, crtc->id);
-		crtc->event = NULL;
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -367,4 +341,3 @@
 	atmel_hlcdc_crtc_destroy(&crtc->base);
 	return ret;
 }
-
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index a45b32b..3d8d164 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -619,15 +619,6 @@
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
-static void atmel_hlcdc_dc_preclose(struct drm_device *dev,
-				    struct drm_file *file)
-{
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		atmel_hlcdc_crtc_cancel_page_flip(crtc, file);
-}
-
 static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
 {
 	struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -698,7 +689,6 @@
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
 			   DRIVER_MODESET | DRIVER_PRIME |
 			   DRIVER_ATOMIC,
-	.preclose = atmel_hlcdc_dc_preclose,
 	.lastclose = atmel_hlcdc_dc_lastclose,
 	.irq_handler = atmel_hlcdc_dc_irq_handler,
 	.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index cf6b375..fed517f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -152,9 +152,6 @@
 
 void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
 
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
-				       struct drm_file *file);
-
 void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
 void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
 
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 2849f1b..96926f0 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -30,13 +30,6 @@
 	}
 }
 
-static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
 				    struct drm_framebuffer *old_fb)
 {
@@ -135,7 +128,6 @@
 
 static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
 	.dpms = bochs_crtc_dpms,
-	.mode_fixup = bochs_crtc_mode_fixup,
 	.mode_set = bochs_crtc_mode_set,
 	.mode_set_base = bochs_crtc_mode_set_base,
 	.prepare = bochs_crtc_prepare,
@@ -152,13 +144,6 @@
 	drm_crtc_helper_add(crtc, &bochs_helper_funcs);
 }
 
-static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
-				     const struct drm_display_mode *mode,
-				     struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void bochs_encoder_mode_set(struct drm_encoder *encoder,
 				   struct drm_display_mode *mode,
 				   struct drm_display_mode *adjusted_mode)
@@ -179,7 +164,6 @@
 
 static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
 	.dpms = bochs_encoder_dpms,
-	.mode_fixup = bochs_encoder_mode_fixup,
 	.mode_set = bochs_encoder_mode_set,
 	.prepare = bochs_encoder_prepare,
 	.commit = bochs_encoder_commit,
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index b0aac473..9795b72 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1391,13 +1391,6 @@
 	mutex_unlock(&hdmi->mutex);
 }
 
-static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
-				      const struct drm_display_mode *mode,
-				      struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
@@ -1546,7 +1539,6 @@
 	.pre_enable = dw_hdmi_bridge_nop,
 	.post_disable = dw_hdmi_bridge_nop,
 	.mode_set = dw_hdmi_bridge_mode_set,
-	.mode_fixup = dw_hdmi_bridge_mode_fixup,
 };
 
 static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 4a02854..d3d8d7b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -91,18 +91,6 @@
 	WREG_GFX(0xe, gr0e);
 }
 
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
-				   const struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
 	struct cirrus_device *cdev = crtc->dev->dev_private;
@@ -372,7 +360,6 @@
 
 static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
 	.dpms = cirrus_crtc_dpms,
-	.mode_fixup = cirrus_crtc_mode_fixup,
 	.mode_set = cirrus_crtc_mode_set,
 	.mode_set_base = cirrus_crtc_mode_set_base,
 	.prepare = cirrus_crtc_prepare,
@@ -430,14 +417,6 @@
 	*blue = cirrus_crtc->lut_b[regno];
 }
 
-
-static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
-				      const struct drm_display_mode *mode,
-				      struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -466,7 +445,6 @@
 
 static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
 	.dpms = cirrus_encoder_dpms,
-	.mode_fixup = cirrus_encoder_mode_fixup,
 	.mode_set = cirrus_encoder_mode_set,
 	.prepare = cirrus_encoder_prepare,
 	.commit = cirrus_encoder_commit,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9a7b446..a2596eb 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
 
 /**
@@ -376,6 +377,59 @@
 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
 
 /**
+ * drm_atomic_replace_property_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ * @expected_size: the expected size of the new blob
+ * @replaced: whether the blob has been replaced
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static void
+drm_atomic_replace_property_blob(struct drm_property_blob **blob,
+				 struct drm_property_blob *new_blob,
+				 bool *replaced)
+{
+	struct drm_property_blob *old_blob = *blob;
+
+	if (old_blob == new_blob)
+		return;
+
+	if (old_blob)
+		drm_property_unreference_blob(old_blob);
+	if (new_blob)
+		drm_property_reference_blob(new_blob);
+	*blob = new_blob;
+	*replaced = true;
+
+	return;
+}
+
+static int
+drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
+					 struct drm_property_blob **blob,
+					 uint64_t blob_id,
+					 ssize_t expected_size,
+					 bool *replaced)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_property_blob *new_blob = NULL;
+
+	if (blob_id != 0) {
+		new_blob = drm_property_lookup_blob(dev, blob_id);
+		if (new_blob == NULL)
+			return -EINVAL;
+		if (expected_size > 0 && expected_size != new_blob->length)
+			return -EINVAL;
+	}
+
+	drm_atomic_replace_property_blob(blob, new_blob, replaced);
+
+	return 0;
+}
+
+/**
  * drm_atomic_crtc_set_property - set property on CRTC
  * @crtc: the drm CRTC to set a property on
  * @state: the state object to update with the new property value
@@ -397,6 +451,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_mode_config *config = &dev->mode_config;
+	bool replaced = false;
 	int ret;
 
 	if (property == config->prop_active)
@@ -407,8 +462,31 @@
 		ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
 		drm_property_unreference_blob(mode);
 		return ret;
-	}
-	else if (crtc->funcs->atomic_set_property)
+	} else if (property == config->degamma_lut_property) {
+		ret = drm_atomic_replace_property_blob_from_id(crtc,
+					&state->degamma_lut,
+					val,
+					-1,
+					&replaced);
+		state->color_mgmt_changed = replaced;
+		return ret;
+	} else if (property == config->ctm_property) {
+		ret = drm_atomic_replace_property_blob_from_id(crtc,
+					&state->ctm,
+					val,
+					sizeof(struct drm_color_ctm),
+					&replaced);
+		state->color_mgmt_changed = replaced;
+		return ret;
+	} else if (property == config->gamma_lut_property) {
+		ret = drm_atomic_replace_property_blob_from_id(crtc,
+					&state->gamma_lut,
+					val,
+					-1,
+					&replaced);
+		state->color_mgmt_changed = replaced;
+		return ret;
+	} else if (crtc->funcs->atomic_set_property)
 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
 	else
 		return -EINVAL;
@@ -444,6 +522,12 @@
 		*val = state->active;
 	else if (property == config->prop_mode_id)
 		*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+	else if (property == config->degamma_lut_property)
+		*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
+	else if (property == config->ctm_property)
+		*val = (state->ctm) ? state->ctm->base.id : 0;
+	else if (property == config->gamma_lut_property)
+		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
 	else if (crtc->funcs->atomic_get_property)
 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
 	else
@@ -1343,46 +1427,25 @@
 		struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
 {
 	struct drm_pending_vblank_event *e = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	if (file_priv->event_space < sizeof e->event) {
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-		goto out;
-	}
-	file_priv->event_space -= sizeof e->event;
-	spin_unlock_irqrestore(&dev->event_lock, flags);
+	int ret;
 
 	e = kzalloc(sizeof *e, GFP_KERNEL);
-	if (e == NULL) {
-		spin_lock_irqsave(&dev->event_lock, flags);
-		file_priv->event_space += sizeof e->event;
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-		goto out;
-	}
+	if (!e)
+		return NULL;
 
 	e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-	e->event.base.length = sizeof e->event;
+	e->event.base.length = sizeof(e->event);
 	e->event.user_data = user_data;
-	e->base.event = &e->event.base;
-	e->base.file_priv = file_priv;
-	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
-out:
+	ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+	if (ret) {
+		kfree(e);
+		return NULL;
+	}
+
 	return e;
 }
 
-static void destroy_vblank_event(struct drm_device *dev,
-		struct drm_file *file_priv, struct drm_pending_vblank_event *e)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	file_priv->event_space += sizeof e->event;
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-	kfree(e);
-}
-
 static int atomic_set_prop(struct drm_atomic_state *state,
 		struct drm_mode_object *obj, struct drm_property *prop,
 		uint64_t prop_value)
@@ -1642,8 +1705,7 @@
 			if (!crtc_state->event)
 				continue;
 
-			destroy_vblank_event(dev, file_priv,
-					     crtc_state->event);
+			drm_event_cancel_free(dev, &crtc_state->event->base);
 		}
 	}
 
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4f2d3e1..2bb90fa 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -86,110 +86,185 @@
 	}
 }
 
-static bool
-check_pending_encoder_assignment(struct drm_atomic_state *state,
-				 struct drm_encoder *new_encoder)
+static int handle_conflicting_encoders(struct drm_atomic_state *state,
+				       bool disable_conflicting_encoders)
 {
-	struct drm_connector *connector;
 	struct drm_connector_state *conn_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, conn_state, i) {
-		if (conn_state->best_encoder != new_encoder)
-			continue;
-
-		/* encoder already assigned and we're trying to re-steal it! */
-		if (connector->state->best_encoder != conn_state->best_encoder)
-			return false;
-	}
-
-	return true;
-}
-
-static struct drm_crtc *
-get_current_crtc_for_encoder(struct drm_device *dev,
-			     struct drm_encoder *encoder)
-{
-	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_connector *connector;
-
-	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-	drm_for_each_connector(connector, dev) {
-		if (connector->state->best_encoder != encoder)
-			continue;
-
-		return connector->state->crtc;
-	}
-
-	return NULL;
-}
-
-static int
-steal_encoder(struct drm_atomic_state *state,
-	      struct drm_encoder *encoder,
-	      struct drm_crtc *encoder_crtc)
-{
-	struct drm_mode_config *config = &state->dev->mode_config;
-	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int ret;
+	struct drm_encoder *encoder;
+	unsigned encoder_mask = 0;
+	int i, ret;
 
 	/*
-	 * We can only steal an encoder coming from a connector, which means we
-	 * must already hold the connection_mutex.
+	 * First loop, find all newly assigned encoders from the connectors
+	 * part of the state. If the same encoder is assigned to multiple
+	 * connectors bail out.
 	 */
-	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+	for_each_connector_in_state(state, connector, conn_state, i) {
+		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+		struct drm_encoder *new_encoder;
 
-	DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
-			 encoder->base.id, encoder->name,
-			 encoder_crtc->base.id, encoder_crtc->name);
-
-	crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
-	if (IS_ERR(crtc_state))
-		return PTR_ERR(crtc_state);
-
-	crtc_state->connectors_changed = true;
-
-	list_for_each_entry(connector, &config->connector_list, head) {
-		if (connector->state->best_encoder != encoder)
+		if (!conn_state->crtc)
 			continue;
 
-		DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
-				 connector->base.id,
-				 connector->name);
+		if (funcs->atomic_best_encoder)
+			new_encoder = funcs->atomic_best_encoder(connector, conn_state);
+		else
+			new_encoder = funcs->best_encoder(connector);
 
-		connector_state = drm_atomic_get_connector_state(state,
-								 connector);
-		if (IS_ERR(connector_state))
-			return PTR_ERR(connector_state);
+		if (new_encoder) {
+			if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+				DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
+					new_encoder->base.id, new_encoder->name,
+					connector->base.id, connector->name);
 
-		ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
+				return -EINVAL;
+			}
+
+			encoder_mask |= 1 << drm_encoder_index(new_encoder);
+		}
+	}
+
+	if (!encoder_mask)
+		return 0;
+
+	/*
+	 * Second loop, iterate over all connectors not part of the state.
+	 *
+	 * If a conflicting encoder is found and disable_conflicting_encoders
+	 * is not set, an error is returned. Userspace can provide a solution
+	 * through the atomic ioctl.
+	 *
+	 * If the flag is set conflicting connectors are removed from the crtc
+	 * and the crtc is disabled if no encoder is left. This preserves
+	 * compatibility with the legacy set_config behavior.
+	 */
+	drm_for_each_connector(connector, state->dev) {
+		struct drm_crtc_state *crtc_state;
+
+		if (drm_atomic_get_existing_connector_state(state, connector))
+			continue;
+
+		encoder = connector->state->best_encoder;
+		if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+			continue;
+
+		if (!disable_conflicting_encoders) {
+			DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
+					 encoder->base.id, encoder->name,
+					 connector->state->crtc->base.id,
+					 connector->state->crtc->name,
+					 connector->base.id, connector->name);
+			return -EINVAL;
+		}
+
+		conn_state = drm_atomic_get_connector_state(state, connector);
+		if (IS_ERR(conn_state))
+			return PTR_ERR(conn_state);
+
+		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
+				 encoder->base.id, encoder->name,
+				 conn_state->crtc->base.id, conn_state->crtc->name,
+				 connector->base.id, connector->name);
+
+		crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
+
+		ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
 		if (ret)
 			return ret;
-		connector_state->best_encoder = NULL;
+
+		if (!crtc_state->connector_mask) {
+			ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
+								NULL);
+			if (ret < 0)
+				return ret;
+
+			crtc_state->active = false;
+		}
 	}
 
 	return 0;
 }
 
+static void
+set_best_encoder(struct drm_atomic_state *state,
+		 struct drm_connector_state *conn_state,
+		 struct drm_encoder *encoder)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+
+	if (conn_state->best_encoder) {
+		/* Unset the encoder_mask in the old crtc state. */
+		crtc = conn_state->connector->state->crtc;
+
+		/* A NULL crtc is an error here because we should have
+		 *  duplicated a NULL best_encoder when crtc was NULL.
+		 * As an exception restoring duplicated atomic state
+		 * during resume is allowed, so don't warn when
+		 * best_encoder is equal to encoder we intend to set.
+		 */
+		WARN_ON(!crtc && encoder != conn_state->best_encoder);
+		if (crtc) {
+			crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+			crtc_state->encoder_mask &=
+				~(1 << drm_encoder_index(conn_state->best_encoder));
+		}
+	}
+
+	if (encoder) {
+		crtc = conn_state->crtc;
+		WARN_ON(!crtc);
+		if (crtc) {
+			crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+			crtc_state->encoder_mask |=
+				1 << drm_encoder_index(encoder);
+		}
+	}
+
+	conn_state->best_encoder = encoder;
+}
+
+static void
+steal_encoder(struct drm_atomic_state *state,
+	      struct drm_encoder *encoder)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		struct drm_crtc *encoder_crtc;
+
+		if (connector_state->best_encoder != encoder)
+			continue;
+
+		encoder_crtc = connector->state->crtc;
+
+		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
+				 encoder->base.id, encoder->name,
+				 encoder_crtc->base.id, encoder_crtc->name);
+
+		set_best_encoder(state, connector_state, NULL);
+
+		crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
+		crtc_state->connectors_changed = true;
+
+		return;
+	}
+}
+
 static int
-update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+update_connector_routing(struct drm_atomic_state *state,
+			 struct drm_connector *connector,
+			 struct drm_connector_state *connector_state)
 {
 	const struct drm_connector_helper_funcs *funcs;
 	struct drm_encoder *new_encoder;
-	struct drm_crtc *encoder_crtc;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
 	struct drm_crtc_state *crtc_state;
-	int idx, ret;
-
-	connector = state->connectors[conn_idx];
-	connector_state = state->connector_states[conn_idx];
-
-	if (!connector)
-		return 0;
 
 	DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
 			 connector->base.id,
@@ -197,16 +272,12 @@
 
 	if (connector->state->crtc != connector_state->crtc) {
 		if (connector->state->crtc) {
-			idx = drm_crtc_index(connector->state->crtc);
-
-			crtc_state = state->crtc_states[idx];
+			crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
 			crtc_state->connectors_changed = true;
 		}
 
 		if (connector_state->crtc) {
-			idx = drm_crtc_index(connector_state->crtc);
-
-			crtc_state = state->crtc_states[idx];
+			crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
 			crtc_state->connectors_changed = true;
 		}
 	}
@@ -216,7 +287,7 @@
 				connector->base.id,
 				connector->name);
 
-		connector_state->best_encoder = NULL;
+		set_best_encoder(state, connector_state, NULL);
 
 		return 0;
 	}
@@ -245,6 +316,8 @@
 	}
 
 	if (new_encoder == connector_state->best_encoder) {
+		set_best_encoder(state, connector_state, new_encoder);
+
 		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
 				 connector->base.id,
 				 connector->name,
@@ -256,33 +329,11 @@
 		return 0;
 	}
 
-	if (!check_pending_encoder_assignment(state, new_encoder)) {
-		DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
-				 connector->base.id,
-				 connector->name);
-		return -EINVAL;
-	}
+	steal_encoder(state, new_encoder);
 
-	encoder_crtc = get_current_crtc_for_encoder(state->dev,
-						    new_encoder);
+	set_best_encoder(state, connector_state, new_encoder);
 
-	if (encoder_crtc) {
-		ret = steal_encoder(state, new_encoder, encoder_crtc);
-		if (ret) {
-			DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
-					 connector->base.id,
-					 connector->name);
-			return ret;
-		}
-	}
-
-	if (WARN_ON(!connector_state->crtc))
-		return -EINVAL;
-
-	connector_state->best_encoder = new_encoder;
-	idx = drm_crtc_index(connector_state->crtc);
-
-	crtc_state = state->crtc_states[idx];
+	crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
 	crtc_state->connectors_changed = true;
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
@@ -445,13 +496,18 @@
 		}
 	}
 
+	ret = handle_conflicting_encoders(state, state->legacy_set_config);
+	if (ret)
+		return ret;
+
 	for_each_connector_in_state(state, connector, connector_state, i) {
 		/*
 		 * This only sets crtc->mode_changed for routing changes,
 		 * drivers must set crtc->mode_changed themselves when connector
 		 * properties need to be updated.
 		 */
-		ret = update_connector_routing(state, i);
+		ret = update_connector_routing(state, connector,
+					       connector_state);
 		if (ret)
 			return ret;
 	}
@@ -617,7 +673,6 @@
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
-		struct drm_crtc_state *old_crtc_state;
 
 		/* Shut down everything that's in the changeset and currently
 		 * still on. So need to check the old, saved state. */
@@ -1719,28 +1774,18 @@
 	struct drm_crtc_state *crtc_state;
 	struct drm_connector *connector;
 	struct drm_connector_state *conn_state;
-	int ret, i, j;
+	int ret, i;
 
 	ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
 			       state->acquire_ctx);
 	if (ret)
 		return ret;
 
-	/* First grab all affected connector/crtc states. */
-	for (i = 0; i < set->num_connectors; i++) {
-		conn_state = drm_atomic_get_connector_state(state,
-							    set->connectors[i]);
-		if (IS_ERR(conn_state))
-			return PTR_ERR(conn_state);
-	}
+	/* First disable all connectors on the target crtc. */
+	ret = drm_atomic_add_affected_connectors(state, set->crtc);
+	if (ret)
+		return ret;
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		ret = drm_atomic_add_affected_connectors(state, crtc);
-		if (ret)
-			return ret;
-	}
-
-	/* Then recompute connector->crtc links and crtc enabling state. */
 	for_each_connector_in_state(state, connector, conn_state, i) {
 		if (conn_state->crtc == set->crtc) {
 			ret = drm_atomic_set_crtc_for_connector(conn_state,
@@ -1748,16 +1793,19 @@
 			if (ret)
 				return ret;
 		}
+	}
 
-		for (j = 0; j < set->num_connectors; j++) {
-			if (set->connectors[j] == connector) {
-				ret = drm_atomic_set_crtc_for_connector(conn_state,
-									set->crtc);
-				if (ret)
-					return ret;
-				break;
-			}
-		}
+	/* Then set all connectors from set->connectors on the target crtc */
+	for (i = 0; i < set->num_connectors; i++) {
+		conn_state = drm_atomic_get_connector_state(state,
+							    set->connectors[i]);
+		if (IS_ERR(conn_state))
+			return PTR_ERR(conn_state);
+
+		ret = drm_atomic_set_crtc_for_connector(conn_state,
+							set->crtc);
+		if (ret)
+			return ret;
 	}
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
@@ -1800,6 +1848,7 @@
 	if (!state)
 		return -ENOMEM;
 
+	state->legacy_set_config = true;
 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
 	ret = __drm_atomic_helper_set_config(set, state);
@@ -2446,8 +2495,12 @@
  */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
 {
-	if (crtc->state)
+	if (crtc->state) {
 		drm_property_unreference_blob(crtc->state->mode_blob);
+		drm_property_unreference_blob(crtc->state->degamma_lut);
+		drm_property_unreference_blob(crtc->state->ctm);
+		drm_property_unreference_blob(crtc->state->gamma_lut);
+	}
 	kfree(crtc->state);
 	crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
 
@@ -2471,10 +2524,17 @@
 
 	if (state->mode_blob)
 		drm_property_reference_blob(state->mode_blob);
+	if (state->degamma_lut)
+		drm_property_reference_blob(state->degamma_lut);
+	if (state->ctm)
+		drm_property_reference_blob(state->ctm);
+	if (state->gamma_lut)
+		drm_property_reference_blob(state->gamma_lut);
 	state->mode_changed = false;
 	state->active_changed = false;
 	state->planes_changed = false;
 	state->connectors_changed = false;
+	state->color_mgmt_changed = false;
 	state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2515,6 +2575,9 @@
 					    struct drm_crtc_state *state)
 {
 	drm_property_unreference_blob(state->mode_blob);
+	drm_property_unreference_blob(state->degamma_lut);
+	drm_property_unreference_blob(state->ctm);
+	drm_property_unreference_blob(state->gamma_lut);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
 
@@ -2549,8 +2612,10 @@
 	kfree(plane->state);
 	plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
 
-	if (plane->state)
+	if (plane->state) {
 		plane->state->plane = plane;
+		plane->state->rotation = BIT(DRM_ROTATE_0);
+	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
@@ -2826,3 +2891,98 @@
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @start:
+ * @size: size of the tables
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that support color management through the DEGAMMA_LUT/GAMMA_LUT
+ * properties.
+ */
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+					u16 *red, u16 *green, u16 *blue,
+					uint32_t start, uint32_t size)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct drm_property_blob *blob = NULL;
+	struct drm_color_lut *blob_data;
+	int i, ret = 0;
+
+	state = drm_atomic_state_alloc(crtc->dev);
+	if (!state)
+		return;
+
+	blob = drm_property_create_blob(dev,
+					sizeof(struct drm_color_lut) * size,
+					NULL);
+	if (IS_ERR(blob)) {
+		ret = PTR_ERR(blob);
+		blob = NULL;
+		goto fail;
+	}
+
+	/* Prepare GAMMA_LUT with the legacy values. */
+	blob_data = (struct drm_color_lut *) blob->data;
+	for (i = 0; i < size; i++) {
+		blob_data[i].red = red[i];
+		blob_data[i].green = green[i];
+		blob_data[i].blue = blue[i];
+	}
+
+	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
+
+	/* Reset DEGAMMA_LUT and CTM properties. */
+	ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+			config->degamma_lut_property, 0);
+	if (ret)
+		goto fail;
+
+	ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+			config->ctm_property, 0);
+	if (ret)
+		goto fail;
+
+	ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+			config->gamma_lut_property, blob->base.id);
+	if (ret)
+		goto fail;
+
+	ret = drm_atomic_commit(state);
+	if (ret)
+		goto fail;
+
+	/* Driver takes ownership of state on successful commit. */
+
+	drm_property_unreference_blob(blob);
+
+	return;
+fail:
+	if (ret == -EDEADLK)
+		goto backoff;
+
+	drm_atomic_state_free(state);
+	drm_property_unreference_blob(blob);
+
+	return;
+backoff:
+	drm_atomic_state_clear(state);
+	drm_atomic_legacy_backoff(state);
+
+	goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index bd93453..b365440 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -186,7 +186,8 @@
 
 	drm_bridge_disable(bridge->next);
 
-	bridge->funcs->disable(bridge);
+	if (bridge->funcs->disable)
+		bridge->funcs->disable(bridge);
 }
 EXPORT_SYMBOL(drm_bridge_disable);
 
@@ -206,7 +207,8 @@
 	if (!bridge)
 		return;
 
-	bridge->funcs->post_disable(bridge);
+	if (bridge->funcs->post_disable)
+		bridge->funcs->post_disable(bridge);
 
 	drm_bridge_post_disable(bridge->next);
 }
@@ -256,7 +258,8 @@
 
 	drm_bridge_pre_enable(bridge->next);
 
-	bridge->funcs->pre_enable(bridge);
+	if (bridge->funcs->pre_enable)
+		bridge->funcs->pre_enable(bridge);
 }
 EXPORT_SYMBOL(drm_bridge_pre_enable);
 
@@ -276,7 +279,8 @@
 	if (!bridge)
 		return;
 
-	bridge->funcs->enable(bridge);
+	if (bridge->funcs->enable)
+		bridge->funcs->enable(bridge);
 
 	drm_bridge_enable(bridge->next);
 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f619121..e08f962 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -430,9 +430,7 @@
 static void __drm_framebuffer_unregister(struct drm_device *dev,
 					 struct drm_framebuffer *fb)
 {
-	mutex_lock(&dev->mode_config.idr_mutex);
-	idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
-	mutex_unlock(&dev->mode_config.idr_mutex);
+	drm_mode_object_put(dev, &fb->base);
 
 	fb->base.id = 0;
 }
@@ -1150,6 +1148,29 @@
 EXPORT_SYMBOL(drm_encoder_init);
 
 /**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+	unsigned int index = 0;
+	struct drm_encoder *tmp;
+
+	drm_for_each_encoder(tmp, encoder->dev) {
+		if (tmp == encoder)
+			return index;
+
+		index++;
+	}
+
+	BUG();
+}
+EXPORT_SYMBOL(drm_encoder_index);
+
+/**
  * drm_encoder_cleanup - cleans up an initialised encoder
  * @encoder: encoder to cleanup
  *
@@ -1531,6 +1552,41 @@
 		return -ENOMEM;
 	dev->mode_config.prop_mode_id = prop;
 
+	prop = drm_property_create(dev,
+			DRM_MODE_PROP_BLOB,
+			"DEGAMMA_LUT", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.degamma_lut_property = prop;
+
+	prop = drm_property_create_range(dev,
+			DRM_MODE_PROP_IMMUTABLE,
+			"DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.degamma_lut_size_property = prop;
+
+	prop = drm_property_create(dev,
+			DRM_MODE_PROP_BLOB,
+			"CTM", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.ctm_property = prop;
+
+	prop = drm_property_create(dev,
+			DRM_MODE_PROP_BLOB,
+			"GAMMA_LUT", 0);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.gamma_lut_property = prop;
+
+	prop = drm_property_create_range(dev,
+			DRM_MODE_PROP_IMMUTABLE,
+			"GAMMA_LUT_SIZE", 0, UINT_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.gamma_lut_size_property = prop;
+
 	return 0;
 }
 
@@ -5254,7 +5310,6 @@
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
-	unsigned long flags;
 	int ret = -EINVAL;
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -5305,41 +5360,26 @@
 	}
 
 	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		ret = -ENOMEM;
-		spin_lock_irqsave(&dev->event_lock, flags);
-		if (file_priv->event_space < sizeof(e->event)) {
-			spin_unlock_irqrestore(&dev->event_lock, flags);
+		e = kzalloc(sizeof *e, GFP_KERNEL);
+		if (!e) {
+			ret = -ENOMEM;
 			goto out;
 		}
-		file_priv->event_space -= sizeof(e->event);
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-
-		e = kzalloc(sizeof(*e), GFP_KERNEL);
-		if (e == NULL) {
-			spin_lock_irqsave(&dev->event_lock, flags);
-			file_priv->event_space += sizeof(e->event);
-			spin_unlock_irqrestore(&dev->event_lock, flags);
-			goto out;
-		}
-
 		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
 		e->event.base.length = sizeof(e->event);
 		e->event.user_data = page_flip->user_data;
-		e->base.event = &e->event.base;
-		e->base.file_priv = file_priv;
-		e->base.destroy =
-			(void (*) (struct drm_pending_event *)) kfree;
+		ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+		if (ret) {
+			kfree(e);
+			goto out;
+		}
 	}
 
 	crtc->primary->old_fb = crtc->primary->fb;
 	ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
 	if (ret) {
-		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-			spin_lock_irqsave(&dev->event_lock, flags);
-			file_priv->event_space += sizeof(e->event);
-			spin_unlock_irqrestore(&dev->event_lock, flags);
-			kfree(e);
-		}
+		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+			drm_event_cancel_free(dev, &e->base);
 		/* Keep the old fb, don't unref it. */
 		crtc->primary->old_fb = NULL;
 	} else {
@@ -5720,6 +5760,48 @@
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
 /**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+	if (plane >= drm_format_num_planes(format))
+		return 0;
+
+	if (plane == 0)
+		return width;
+
+	return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+	if (plane >= drm_format_num_planes(format))
+		return 0;
+
+	if (plane == 0)
+		return height;
+
+	return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
+
+/**
  * drm_rotation_simplify() - Try to simplify the rotation
  * @rotation: Rotation to be simplified
  * @supported_rotations: Supported rotations
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a02a7f9..79555d2 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -73,9 +73,6 @@
  * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
  * &drm_connector_helper_funcs.
  */
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
 
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
@@ -220,6 +217,15 @@
  * disconnected connectors. Then it will disable all unused encoders and CRTCs
  * either by calling their disable callback if available or by calling their
  * dpms callback with DRM_MODE_DPMS_OFF.
+ *
+ * NOTE:
+ *
+ * This function is part of the legacy modeset helper library and will cause
+ * major confusion with atomic drivers. This is because atomic helpers guarantee
+ * to never call ->disable() hooks on a disabled function, or ->enable() hooks
+ * on an enabled functions. drm_helper_disable_unused_functions() on the other
+ * hand throws such guarantees into the wind and calls disable hooks
+ * unconditionally on unused functions.
  */
 void drm_helper_disable_unused_functions(struct drm_device *dev)
 {
@@ -328,16 +334,21 @@
 		}
 
 		encoder_funcs = encoder->helper_private;
-		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
-						      adjusted_mode))) {
-			DRM_DEBUG_KMS("Encoder fixup failed\n");
-			goto done;
+		if (encoder_funcs->mode_fixup) {
+			if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+							      adjusted_mode))) {
+				DRM_DEBUG_KMS("Encoder fixup failed\n");
+				goto done;
+			}
 		}
 	}
 
-	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
-		DRM_DEBUG_KMS("CRTC fixup failed\n");
-		goto done;
+	if (crtc_funcs->mode_fixup) {
+		if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
+						adjusted_mode))) {
+			DRM_DEBUG_KMS("CRTC fixup failed\n");
+			goto done;
+		}
 	}
 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
@@ -578,8 +589,6 @@
 		if (set->crtc->primary->fb == NULL) {
 			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
 			mode_changed = true;
-		} else if (set->fb == NULL) {
-			mode_changed = true;
 		} else if (set->fb->pixel_format !=
 			   set->crtc->primary->fb->pixel_format) {
 			mode_changed = true;
@@ -590,7 +599,7 @@
 	if (set->x != set->crtc->x || set->y != set->crtc->y)
 		fb_changed = true;
 
-	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+	if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
 		DRM_DEBUG_KMS("modes are different, full mode set\n");
 		drm_mode_debug_printmodeline(&set->crtc->mode);
 		drm_mode_debug_printmodeline(set->mode);
@@ -1066,3 +1075,36 @@
 	return drm_plane_helper_commit(plane, plane_state, old_fb);
 }
 EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
+
+/**
+ * drm_helper_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction properties on a
+ * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
+ * set and 2 size properties to inform the userspace of the lut sizes.
+ */
+void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+				       int degamma_lut_size,
+				       int gamma_lut_size)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *config = &dev->mode_config;
+
+	drm_object_attach_property(&crtc->base,
+				   config->degamma_lut_property, 0);
+	drm_object_attach_property(&crtc->base,
+				   config->ctm_property, 0);
+	drm_object_attach_property(&crtc->base,
+				   config->gamma_lut_property, 0);
+
+	drm_object_attach_property(&crtc->base,
+				   config->degamma_lut_size_property,
+				   degamma_lut_size);
+	drm_object_attach_property(&crtc->base,
+				   config->gamma_lut_size_property,
+				   gamma_lut_size);
+}
+EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
new file mode 100644
index 0000000..f73b38b
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drmP.h>
+
+struct drm_dp_aux_dev {
+	unsigned index;
+	struct drm_dp_aux *aux;
+	struct device *dev;
+	struct kref refcount;
+	atomic_t usecount;
+};
+
+#define DRM_AUX_MINORS	256
+#define AUX_MAX_OFFSET	(1 << 20)
+static DEFINE_IDR(aux_idr);
+static DEFINE_MUTEX(aux_idr_mutex);
+static struct class *drm_dp_aux_dev_class;
+static int drm_dev_major = -1;
+
+static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
+{
+	struct drm_dp_aux_dev *aux_dev = NULL;
+
+	mutex_lock(&aux_idr_mutex);
+	aux_dev = idr_find(&aux_idr, index);
+	if (!kref_get_unless_zero(&aux_dev->refcount))
+		aux_dev = NULL;
+	mutex_unlock(&aux_idr_mutex);
+
+	return aux_dev;
+}
+
+static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
+{
+	struct drm_dp_aux_dev *aux_dev;
+	int index;
+
+	aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL);
+	if (!aux_dev)
+		return ERR_PTR(-ENOMEM);
+	aux_dev->aux = aux;
+	atomic_set(&aux_dev->usecount, 1);
+	kref_init(&aux_dev->refcount);
+
+	mutex_lock(&aux_idr_mutex);
+	index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS,
+				 GFP_KERNEL);
+	mutex_unlock(&aux_idr_mutex);
+	if (index < 0) {
+		kfree(aux_dev);
+		return ERR_PTR(index);
+	}
+	aux_dev->index = index;
+
+	return aux_dev;
+}
+
+static void release_drm_dp_aux_dev(struct kref *ref)
+{
+	struct drm_dp_aux_dev *aux_dev =
+		container_of(ref, struct drm_dp_aux_dev, refcount);
+
+	kfree(aux_dev);
+}
+
+static ssize_t name_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	ssize_t res;
+	struct drm_dp_aux_dev *aux_dev =
+		drm_dp_aux_dev_get_by_minor(MINOR(dev->devt));
+
+	if (!aux_dev)
+		return -ENODEV;
+
+	res = sprintf(buf, "%s\n", aux_dev->aux->name);
+	kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+
+	return res;
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *drm_dp_aux_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(drm_dp_aux);
+
+static int auxdev_open(struct inode *inode, struct file *file)
+{
+	unsigned int minor = iminor(inode);
+	struct drm_dp_aux_dev *aux_dev;
+
+	aux_dev = drm_dp_aux_dev_get_by_minor(minor);
+	if (!aux_dev)
+		return -ENODEV;
+
+	file->private_data = aux_dev;
+	return 0;
+}
+
+static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
+{
+	return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
+}
+
+static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
+			   loff_t *offset)
+{
+	size_t bytes_pending, num_bytes_processed = 0;
+	struct drm_dp_aux_dev *aux_dev = file->private_data;
+	ssize_t res = 0;
+
+	if (!atomic_inc_not_zero(&aux_dev->usecount))
+		return -ENODEV;
+
+	bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset));
+
+	if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) {
+		res = -EFAULT;
+		goto out;
+	}
+
+	while (bytes_pending > 0) {
+		uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
+		ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+
+		res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
+		if (res <= 0) {
+			res = num_bytes_processed ? num_bytes_processed : res;
+			goto out;
+		}
+		if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) {
+			res = num_bytes_processed ?
+				num_bytes_processed : -EFAULT;
+			goto out;
+		}
+		bytes_pending -= res;
+		*offset += res;
+		num_bytes_processed += res;
+		res = num_bytes_processed;
+	}
+
+out:
+	atomic_dec(&aux_dev->usecount);
+	wake_up_atomic_t(&aux_dev->usecount);
+	return res;
+}
+
+static ssize_t auxdev_write(struct file *file, const char __user *buf,
+			    size_t count, loff_t *offset)
+{
+	size_t bytes_pending, num_bytes_processed = 0;
+	struct drm_dp_aux_dev *aux_dev = file->private_data;
+	ssize_t res = 0;
+
+	if (!atomic_inc_not_zero(&aux_dev->usecount))
+		return -ENODEV;
+
+	bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset);
+
+	if (!access_ok(VERIFY_READ, buf, bytes_pending)) {
+		res = -EFAULT;
+		goto out;
+	}
+
+	while (bytes_pending > 0) {
+		uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
+		ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+
+		if (__copy_from_user(localbuf,
+				     buf + num_bytes_processed, todo)) {
+			res = num_bytes_processed ?
+				num_bytes_processed : -EFAULT;
+			goto out;
+		}
+
+		res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo);
+		if (res <= 0) {
+			res = num_bytes_processed ? num_bytes_processed : res;
+			goto out;
+		}
+		bytes_pending -= res;
+		*offset += res;
+		num_bytes_processed += res;
+		res = num_bytes_processed;
+	}
+
+out:
+	atomic_dec(&aux_dev->usecount);
+	wake_up_atomic_t(&aux_dev->usecount);
+	return res;
+}
+
+static int auxdev_release(struct inode *inode, struct file *file)
+{
+	struct drm_dp_aux_dev *aux_dev = file->private_data;
+
+	kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+	return 0;
+}
+
+static const struct file_operations auxdev_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= auxdev_llseek,
+	.read		= auxdev_read,
+	.write		= auxdev_write,
+	.open		= auxdev_open,
+	.release	= auxdev_release,
+};
+
+#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux)
+
+static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
+{
+	struct drm_dp_aux_dev *iter, *aux_dev = NULL;
+	int id;
+
+	/* don't increase kref count here because this function should only be
+	 * used by drm_dp_aux_unregister_devnode. Thus, it will always have at
+	 * least one reference - the one that drm_dp_aux_register_devnode
+	 * created
+	 */
+	mutex_lock(&aux_idr_mutex);
+	idr_for_each_entry(&aux_idr, iter, id) {
+		if (iter->aux == aux) {
+			aux_dev = iter;
+			break;
+		}
+	}
+	mutex_unlock(&aux_idr_mutex);
+	return aux_dev;
+}
+
+static int auxdev_wait_atomic_t(atomic_t *p)
+{
+	schedule();
+	return 0;
+}
+/**
+ * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+	struct drm_dp_aux_dev *aux_dev;
+	unsigned int minor;
+
+	aux_dev = drm_dp_aux_dev_get_by_aux(aux);
+	if (!aux_dev) /* attach must have failed */
+		return;
+
+	mutex_lock(&aux_idr_mutex);
+	idr_remove(&aux_idr, aux_dev->index);
+	mutex_unlock(&aux_idr_mutex);
+
+	atomic_dec(&aux_dev->usecount);
+	wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t,
+			 TASK_UNINTERRUPTIBLE);
+
+	minor = aux_dev->index;
+	if (aux_dev->dev)
+		device_destroy(drm_dp_aux_dev_class,
+			       MKDEV(drm_dev_major, minor));
+
+	DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
+	kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+}
+EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
+
+/**
+ * drm_dp_aux_register_devnode() - register a devnode for this aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+	struct drm_dp_aux_dev *aux_dev;
+	int res;
+
+	aux_dev = alloc_drm_dp_aux_dev(aux);
+	if (IS_ERR(aux_dev))
+		return PTR_ERR(aux_dev);
+
+	aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev,
+				     MKDEV(drm_dev_major, aux_dev->index), NULL,
+				     "drm_dp_aux%d", aux_dev->index);
+	if (IS_ERR(aux_dev->dev)) {
+		res = PTR_ERR(aux_dev->dev);
+		aux_dev->dev = NULL;
+		goto error;
+	}
+
+	DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n",
+		  aux->name, aux_dev->index);
+	return 0;
+error:
+	drm_dp_aux_unregister_devnode(aux);
+	return res;
+}
+EXPORT_SYMBOL(drm_dp_aux_register_devnode);
+
+int drm_dp_aux_dev_init(void)
+{
+	int res;
+
+	drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
+	if (IS_ERR(drm_dp_aux_dev_class)) {
+		res = PTR_ERR(drm_dp_aux_dev_class);
+		goto out;
+	}
+	drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
+
+	res = register_chrdev(0, "aux", &auxdev_fops);
+	if (res < 0)
+		goto out;
+	drm_dev_major = res;
+
+	return 0;
+out:
+	class_destroy(drm_dp_aux_dev_class);
+	return res;
+}
+EXPORT_SYMBOL(drm_dp_aux_dev_init);
+
+void drm_dp_aux_dev_exit(void)
+{
+	unregister_chrdev(drm_dev_major, "aux");
+	class_destroy(drm_dp_aux_dev_class);
+}
+EXPORT_SYMBOL(drm_dp_aux_dev_exit);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9535c5b..7d58f59 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/i2c.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_dev.h>
 #include <drm/drmP.h>
 
 /**
@@ -754,6 +755,8 @@
  */
 int drm_dp_aux_register(struct drm_dp_aux *aux)
 {
+	int ret;
+
 	mutex_init(&aux->hw_mutex);
 
 	aux->ddc.algo = &drm_dp_i2c_algo;
@@ -768,7 +771,17 @@
 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
 		sizeof(aux->ddc.name));
 
-	return i2c_add_adapter(&aux->ddc);
+	ret = drm_dp_aux_register_devnode(aux);
+	if (ret)
+		return ret;
+
+	ret = i2c_add_adapter(&aux->ddc);
+	if (ret) {
+		drm_dp_aux_unregister_devnode(aux);
+		return ret;
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_dp_aux_register);
 
@@ -778,6 +791,7 @@
  */
 void drm_dp_aux_unregister(struct drm_dp_aux *aux)
 {
+	drm_dp_aux_unregister_devnode(aux);
 	i2c_del_adapter(&aux->ddc);
 }
 EXPORT_SYMBOL(drm_dp_aux_unregister);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 04cb487..414d7f6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
 #include <linux/hdmi.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_displayid.h>
@@ -1395,6 +1396,31 @@
 EXPORT_SYMBOL(drm_get_edid);
 
 /**
+ * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
+ * @connector: connector we're probing
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
+ * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
+ * switch DDC to the GPU which is retrieving EDID.
+ *
+ * Return: Pointer to valid EDID or %NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+				     struct i2c_adapter *adapter)
+{
+	struct pci_dev *pdev = connector->dev->pdev;
+	struct edid *edid;
+
+	vga_switcheroo_lock_ddc(pdev);
+	edid = drm_get_edid(connector, adapter);
+	vga_switcheroo_unlock_ddc(pdev);
+
+	return edid;
+}
+EXPORT_SYMBOL(drm_get_edid_switcheroo);
+
+/**
  * drm_edid_duplicate - duplicate an EDID and the extensions
  * @edid: EDID to duplicate
  *
@@ -3282,7 +3308,7 @@
 	u8 *cea;
 	u8 *name;
 	u8 *db;
-	int sad_count = 0;
+	int total_sad_count = 0;
 	int mnl;
 	int dbl;
 
@@ -3296,6 +3322,7 @@
 
 	name = NULL;
 	drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+	/* max: 13 bytes EDID, 16 bytes ELD */
 	for (mnl = 0; name && mnl < 13; mnl++) {
 		if (name[mnl] == 0x0a)
 			break;
@@ -3324,11 +3351,15 @@
 			dbl = cea_db_payload_len(db);
 
 			switch (cea_db_tag(db)) {
+				int sad_count;
+
 			case AUDIO_BLOCK:
 				/* Audio Data Block, contains SADs */
-				sad_count = dbl / 3;
-				if (dbl >= 1)
-					memcpy(eld + 20 + mnl, &db[1], dbl);
+				sad_count = min(dbl / 3, 15 - total_sad_count);
+				if (sad_count >= 1)
+					memcpy(eld + 20 + mnl + total_sad_count * 3,
+					       &db[1], sad_count * 3);
+				total_sad_count += sad_count;
 				break;
 			case SPEAKER_BLOCK:
 				/* Speaker Allocation Data Block */
@@ -3345,13 +3376,13 @@
 			}
 		}
 	}
-	eld[5] |= sad_count << 4;
+	eld[5] |= total_sad_count << 4;
 
 	eld[DRM_ELD_BASELINE_ELD_LEN] =
 		DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
 
 	DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
-		      drm_eld_size(eld), sad_count);
+		      drm_eld_size(eld), total_sad_count);
 }
 EXPORT_SYMBOL(drm_edid_to_eld);
 
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index e862907..4484785 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -140,6 +140,9 @@
 		const struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
 {
+	if (!get_slave_funcs(encoder)->mode_fixup)
+		return true;
+
 	return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
 }
 EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c895b6f..bb88e3d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -74,7 +74,8 @@
 };
 
 static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
-	const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+	const struct drm_mode_fb_cmd2 *mode_cmd,
+	struct drm_gem_cma_object **obj,
 	unsigned int num_planes)
 {
 	struct drm_fb_cma *fb_cma;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1e103c4..855108e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -104,21 +104,17 @@
 {
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_connector *connector;
-	int i;
+	int i, ret;
 
 	if (!drm_fbdev_emulation)
 		return 0;
 
 	mutex_lock(&dev->mode_config.mutex);
 	drm_for_each_connector(connector, dev) {
-		struct drm_fb_helper_connector *fb_helper_connector;
+		ret = drm_fb_helper_add_one_connector(fb_helper, connector);
 
-		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
-		if (!fb_helper_connector)
+		if (ret)
 			goto fail;
-
-		fb_helper_connector->connector = connector;
-		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
@@ -130,7 +126,7 @@
 	fb_helper->connector_count = 0;
 	mutex_unlock(&dev->mode_config.mutex);
 
-	return -ENOMEM;
+	return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
@@ -1989,13 +1985,13 @@
 	width = dev->mode_config.max_width;
 	height = dev->mode_config.max_height;
 
-	crtcs = kcalloc(dev->mode_config.num_connector,
+	crtcs = kcalloc(fb_helper->connector_count,
 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
-	modes = kcalloc(dev->mode_config.num_connector,
+	modes = kcalloc(fb_helper->connector_count,
 			sizeof(struct drm_display_mode *), GFP_KERNEL);
-	offsets = kcalloc(dev->mode_config.num_connector,
+	offsets = kcalloc(fb_helper->connector_count,
 			  sizeof(struct drm_fb_offset), GFP_KERNEL);
-	enabled = kcalloc(dev->mode_config.num_connector,
+	enabled = kcalloc(fb_helper->connector_count,
 			  sizeof(bool), GFP_KERNEL);
 	if (!crtcs || !modes || !enabled || !offsets) {
 		DRM_ERROR("Memory allocation failed\n");
@@ -2009,9 +2005,9 @@
 	      fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
 					       offsets,
 					       enabled, width, height))) {
-		memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
-		memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
-		memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
+		memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
+		memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
+		memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
 
 		if (!drm_target_cloned(fb_helper, modes, offsets,
 				       enabled, width, height) &&
@@ -2091,6 +2087,27 @@
  * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
  * values for the fbdev info structure.
  *
+ * HANG DEBUGGING:
+ *
+ * When you have fbcon support built-in or already loaded, this function will do
+ * a full modeset to setup the fbdev console. Due to locking misdesign in the
+ * VT/fbdev subsystem that entire modeset sequence has to be done while holding
+ * console_lock. Until console_unlock is called no dmesg lines will be sent out
+ * to consoles, not even serial console. This means when your driver crashes,
+ * you will see absolutely nothing else but a system stuck in this function,
+ * with no further output. Any kind of printk() you place within your own driver
+ * or in the drm core modeset code will also never show up.
+ *
+ * Standard debug practice is to run the fbcon setup without taking the
+ * console_lock as a hack, to be able to see backtraces and crashes on the
+ * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
+ * cmdline option.
+ *
+ * The other option is to just disable fbdev emulation since very likely the
+ * first modest from userspace will crash in the same way, and is even easier to
+ * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * kernel cmdline option.
+ *
  * RETURNS:
  * Zero if everything went ok, nonzero otherwise.
  */
@@ -2175,9 +2192,9 @@
  * but the module doesn't depend on any fb console symbols.  At least
  * attempt to load fbcon to avoid leaving the system without a usable console.
  */
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
-static int __init drm_fb_helper_modinit(void)
+int __init drm_fb_helper_modinit(void)
 {
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
 	const char *name = "fbcon";
 	struct module *fbcon;
 
@@ -2187,8 +2204,7 @@
 
 	if (!fbcon)
 		request_module_nowait(name);
+#endif
 	return 0;
 }
-
-module_init(drm_fb_helper_modinit);
-#endif
+EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 1ea8790..aeef58e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -1,4 +1,4 @@
-/**
+/*
  * \file drm_fops.c
  * File operations for DRM
  *
@@ -44,6 +44,46 @@
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
+/**
+ * DOC: file operations
+ *
+ * Drivers must define the file operations structure that forms the DRM
+ * userspace API entry point, even though most of those operations are
+ * implemented in the DRM core. The mandatory functions are drm_open(),
+ * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
+ * Drivers which implement private ioctls that require 32/64 bit compatibility
+ * support must provided their onw .compat_ioctl() handler that processes
+ * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ *
+ * In addition drm_read() and drm_poll() provide support for DRM events. DRM
+ * events are a generic and extensible means to send asynchronous events to
+ * userspace through the file descriptor. They are used to send vblank event and
+ * page flip completions by the KMS API. But drivers can also use it for their
+ * own needs, e.g. to signal completion of rendering.
+ *
+ * The memory mapping implementation will vary depending on how the driver
+ * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
+ * function, modern drivers should use one of the provided memory-manager
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ *
+ * No other file operations are supported by the DRM userspace API. Overall the
+ * following is an example #file_operations structure:
+ *
+ *     static const example_drm_fops = {
+ *             .owner = THIS_MODULE,
+ *             .open = drm_open,
+ *             .release = drm_release,
+ *             .unlocked_ioctl = drm_ioctl,
+ *     #ifdef CONFIG_COMPAT
+ *             .compat_ioctl = drm_compat_ioctl,
+ *     #endif
+ *             .poll = drm_poll,
+ *             .read = drm_read,
+ *             .llseek = no_llseek,
+ *             .mmap = drm_gem_mmap,
+ *     };
+ */
+
 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
 
 static int drm_setup(struct drm_device * dev)
@@ -67,15 +107,17 @@
 }
 
 /**
- * Open file.
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
  *
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .open() #file_operations
+ * method. It looks up the correct DRM device and instantiates all the per-file
+ * resources for it.
  *
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
+ * RETURNS:
+ *
+ * 0 on success or negative errno value on falure.
  */
 int drm_open(struct inode *inode, struct file *filp)
 {
@@ -112,7 +154,7 @@
 }
 EXPORT_SYMBOL(drm_open);
 
-/**
+/*
  * Check whether DRI will run on this CPU.
  *
  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
@@ -125,7 +167,7 @@
 	return 1;
 }
 
-/**
+/*
  * drm_new_set_master - Allocate a new master object and become master for the
  * associated master realm.
  *
@@ -179,7 +221,7 @@
 	return ret;
 }
 
-/**
+/*
  * Called whenever a process opens /dev/drm.
  *
  * \param filp file pointer.
@@ -222,6 +264,7 @@
 	INIT_LIST_HEAD(&priv->fbs);
 	mutex_init(&priv->fbs_lock);
 	INIT_LIST_HEAD(&priv->blobs);
+	INIT_LIST_HEAD(&priv->pending_event_list);
 	INIT_LIST_HEAD(&priv->event_list);
 	init_waitqueue_head(&priv->event_wait);
 	priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -311,18 +354,16 @@
 {
 	struct drm_device *dev = file_priv->minor->dev;
 	struct drm_pending_event *e, *et;
-	struct drm_pending_vblank_event *v, *vt;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
-	/* Remove pending flips */
-	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
-		if (v->base.file_priv == file_priv) {
-			list_del(&v->base.link);
-			drm_vblank_put(dev, v->pipe);
-			v->base.destroy(&v->base);
-		}
+	/* Unlink pending events */
+	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+				 pending_link) {
+		list_del(&e->pending_link);
+		e->file_priv = NULL;
+	}
 
 	/* Remove unconsumed events */
 	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
@@ -333,7 +374,7 @@
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-/**
+/*
  * drm_legacy_dev_reinit
  *
  * Reinitializes a legacy/ums drm device in it's lastclose function.
@@ -350,7 +391,7 @@
 	dev->if_version = 0;
 }
 
-/**
+/*
  * Take down the DRM device.
  *
  * \param dev DRM device structure.
@@ -387,16 +428,17 @@
 }
 
 /**
- * Release file.
+ * drm_release - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
  *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .release() #file_operations
+ * method. It frees any resources associated with the open file, and if this is
+ * the last open file for the DRM device also proceeds to call drm_lastclose().
  *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
  */
 int drm_release(struct inode *inode, struct file *filp)
 {
@@ -451,7 +493,7 @@
 	if (file_priv->is_master) {
 		struct drm_master *master = file_priv->master;
 
-		/**
+		/*
 		 * Since the master is disappearing, so is the
 		 * possibility to lock.
 		 */
@@ -508,6 +550,32 @@
 }
 EXPORT_SYMBOL(drm_release);
 
+/**
+ * drm_read - read method for DRM file
+ * @filp: file pointer
+ * @buffer: userspace destination pointer for the read
+ * @count: count in bytes to read
+ * @offset: offset to read
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
+ * must set the .llseek() #file_operation to no_llseek(). Polling support is
+ * provided by drm_poll().
+ *
+ * This function will only ever read a full event. Therefore userspace must
+ * supply a big enough buffer to fit any event to ensure forward progress. Since
+ * the maximum event space is currently 4K it's recommended to just use that for
+ * safety.
+ *
+ * RETURNS:
+ *
+ * Number of bytes read (always aligned to full events, and can be 0) or a
+ * negative error code on failure.
+ */
 ssize_t drm_read(struct file *filp, char __user *buffer,
 		 size_t count, loff_t *offset)
 {
@@ -578,6 +646,22 @@
 }
 EXPORT_SYMBOL(drm_read);
 
+/**
+ * drm_poll - poll method for DRM file
+ * @filp: file pointer
+ * @wait: poll waiter table
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * See also drm_read().
+ *
+ * RETURNS:
+ *
+ * Mask of POLL flags indicating the current status of the file.
+ */
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 {
 	struct drm_file *file_priv = filp->private_data;
@@ -591,3 +675,164 @@
 	return mask;
 }
 EXPORT_SYMBOL(drm_poll);
+
+/**
+ * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * This is the locked version of drm_event_reserve_init() for callers which
+ * already hold dev->event_lock.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init_locked(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_pending_event *p,
+				  struct drm_event *e)
+{
+	if (file_priv->event_space < e->length)
+		return -ENOMEM;
+
+	file_priv->event_space -= e->length;
+
+	p->event = e;
+	list_add(&p->pending_link, &file_priv->pending_event_list);
+	p->file_priv = file_priv;
+
+	/* we *could* pass this in as arg, but everyone uses kfree: */
+	p->destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_event_reserve_init_locked);
+
+/**
+ * drm_event_reserve_init - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * Callers which already hold dev->event_lock should use
+ * drm_event_reserve_init() instead.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init(struct drm_device *dev,
+			   struct drm_file *file_priv,
+			   struct drm_pending_event *p,
+			   struct drm_event *e)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_event_reserve_init);
+
+/**
+ * drm_event_cancel_free - free a DRM event and release it's space
+ * @dev: DRM device
+ * @p: tracking structure for the pending event
+ *
+ * This function frees the event @p initialized with drm_event_reserve_init()
+ * and releases any allocated space.
+ */
+void drm_event_cancel_free(struct drm_device *dev,
+			   struct drm_pending_event *p)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (p->file_priv) {
+		p->file_priv->event_space += p->event->length;
+		list_del(&p->pending_link);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	p->destroy(p);
+}
+EXPORT_SYMBOL(drm_event_cancel_free);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * dev->event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+	assert_spin_locked(&dev->event_lock);
+
+	if (!e->file_priv) {
+		e->destroy(e);
+		return;
+	}
+
+	list_del(&e->pending_link);
+	list_add_tail(&e->link,
+		      &e->file_priv->event_list);
+	wake_up_interruptible(&e->file_priv->event_wait);
+}
+EXPORT_SYMBOL(drm_send_event_locked);
+
+/**
+ * drm_send_event - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. This function acquires dev->event_lock,
+ * see drm_send_event_locked() for callers which already hold this lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->event_lock, irqflags);
+	drm_send_event_locked(dev, e);
+	spin_unlock_irqrestore(&dev->event_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_send_event);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 1fe1457..881c5a6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1041,15 +1041,12 @@
 		struct drm_pending_vblank_event *e,
 		unsigned long seq, struct timeval *now)
 {
-	assert_spin_locked(&dev->event_lock);
-
 	e->event.sequence = seq;
 	e->event.tv_sec = now->tv_sec;
 	e->event.tv_usec = now->tv_usec;
 
-	list_add_tail(&e->base.link,
-		      &e->base.file_priv->event_list);
-	wake_up_interruptible(&e->base.file_priv->event_wait);
+	drm_send_event_locked(dev, &e->base);
+
 	trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
 					 e->event.sequence);
 }
@@ -1668,9 +1665,6 @@
 	e->event.base.type = DRM_EVENT_VBLANK;
 	e->event.base.length = sizeof(e->event);
 	e->event.user_data = vblwait->request.signal;
-	e->base.event = &e->event.base;
-	e->base.file_priv = file_priv;
-	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -1686,12 +1680,12 @@
 		goto err_unlock;
 	}
 
-	if (file_priv->event_space < sizeof(e->event)) {
-		ret = -EBUSY;
-		goto err_unlock;
-	}
+	ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+					    &e->event.base);
 
-	file_priv->event_space -= sizeof(e->event);
+	if (ret)
+		goto err_unlock;
+
 	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
new file mode 100644
index 0000000..3187c4b
--- /dev/null
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_aux_dev.h>
+
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
+static int __init drm_kms_helper_init(void)
+{
+	int ret;
+
+	/* Call init functions from specific kms helpers here */
+	ret = drm_fb_helper_modinit();
+	if (ret < 0)
+		goto out;
+
+	ret = drm_dp_aux_dev_init();
+	if (ret < 0)
+		goto out;
+
+out:
+	return ret;
+}
+
+static void __exit drm_kms_helper_exit(void)
+{
+	/* Call exit functions from specific kms helpers here */
+	drm_dp_aux_dev_exit();
+}
+
+module_init(drm_kms_helper_init);
+module_exit(drm_kms_helper_exit);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6e6a9c5..f5d8083 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -47,7 +47,17 @@
 
 static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
 {
-	return of_driver_match_device(dev, drv);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+	/* attempt OF style match */
+	if (of_driver_match_device(dev, drv))
+		return 1;
+
+	/* compare DSI device and driver names */
+	if (!strcmp(dsi->name, drv->name))
+		return 1;
+
+	return 0;
 }
 
 static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -129,14 +139,20 @@
 	return device_add(&dsi->dev);
 }
 
+#if IS_ENABLED(CONFIG_OF)
 static struct mipi_dsi_device *
 of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
 {
-	struct mipi_dsi_device *dsi;
 	struct device *dev = host->dev;
+	struct mipi_dsi_device_info info = { };
 	int ret;
 	u32 reg;
 
+	if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+		dev_err(dev, "modalias failure on %s\n", node->full_name);
+		return ERR_PTR(-EINVAL);
+	}
+
 	ret = of_property_read_u32(node, "reg", &reg);
 	if (ret) {
 		dev_err(dev, "device node %s has no valid reg property: %d\n",
@@ -144,32 +160,111 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (reg > 3) {
-		dev_err(dev, "device node %s has invalid reg property: %u\n",
-			node->full_name, reg);
+	info.channel = reg;
+	info.node = of_node_get(node);
+
+	return mipi_dsi_device_register_full(host, &info);
+}
+#else
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
+
+/**
+ * mipi_dsi_device_register_full - create a MIPI DSI device
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+			      const struct mipi_dsi_device_info *info)
+{
+	struct mipi_dsi_device *dsi;
+	struct device *dev = host->dev;
+	int ret;
+
+	if (!info) {
+		dev_err(dev, "invalid mipi_dsi_device_info pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (info->channel > 3) {
+		dev_err(dev, "invalid virtual channel: %u\n", info->channel);
 		return ERR_PTR(-EINVAL);
 	}
 
 	dsi = mipi_dsi_device_alloc(host);
 	if (IS_ERR(dsi)) {
-		dev_err(dev, "failed to allocate DSI device %s: %ld\n",
-			node->full_name, PTR_ERR(dsi));
+		dev_err(dev, "failed to allocate DSI device %ld\n",
+			PTR_ERR(dsi));
 		return dsi;
 	}
 
-	dsi->dev.of_node = of_node_get(node);
-	dsi->channel = reg;
+	dsi->dev.of_node = info->node;
+	dsi->channel = info->channel;
+	strlcpy(dsi->name, info->type, sizeof(dsi->name));
 
 	ret = mipi_dsi_device_add(dsi);
 	if (ret) {
-		dev_err(dev, "failed to add DSI device %s: %d\n",
-			node->full_name, ret);
+		dev_err(dev, "failed to add DSI device %d\n", ret);
 		kfree(dsi);
 		return ERR_PTR(ret);
 	}
 
 	return dsi;
 }
+EXPORT_SYMBOL(mipi_dsi_device_register_full);
+
+/**
+ * mipi_dsi_device_unregister - unregister MIPI DSI device
+ * @dsi: DSI peripheral device
+ */
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
+{
+	device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_device_unregister);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+/**
+ * of_find_mipi_dsi_host_by_node() - find the MIPI DSI host matching a
+ *				     device tree node
+ * @node: device tree node
+ *
+ * Returns:
+ * A pointer to the MIPI DSI host corresponding to @node or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+	struct mipi_dsi_host *host;
+
+	mutex_lock(&host_lock);
+
+	list_for_each_entry(host, &host_list, list) {
+		if (host->dev->of_node == node) {
+			mutex_unlock(&host_lock);
+			return host;
+		}
+	}
+
+	mutex_unlock(&host_lock);
+
+	return NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
 
 int mipi_dsi_host_register(struct mipi_dsi_host *host)
 {
@@ -182,6 +277,10 @@
 		of_mipi_dsi_device_add(host, node);
 	}
 
+	mutex_lock(&host_lock);
+	list_add_tail(&host->list, &host_list);
+	mutex_unlock(&host_lock);
+
 	return 0;
 }
 EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -190,7 +289,7 @@
 {
 	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
 
-	device_unregister(&dsi->dev);
+	mipi_dsi_device_unregister(dsi);
 
 	return 0;
 }
@@ -198,6 +297,10 @@
 void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
 {
 	device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+	mutex_lock(&host_lock);
+	list_del_init(&host->list);
+	mutex_unlock(&host_lock);
 }
 EXPORT_SYMBOL(mipi_dsi_host_unregister);
 
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 20775c0..f7448a5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1371,8 +1371,7 @@
 	}
 done:
 	if (i >= 0) {
-		printk(KERN_WARNING
-			"parse error at position %i in video mode '%s'\n",
+		pr_warn("[drm] parse error at position %i in video mode '%s'\n",
 			i, name);
 		mode->specified = false;
 		return false;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 493c05c..bc98bb9 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -149,3 +149,37 @@
 	return component_master_add_with_match(dev, m_ops, match);
 }
 EXPORT_SYMBOL(drm_of_component_probe);
+
+/*
+ * drm_of_encoder_active_endpoint - return the active encoder endpoint
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ *
+ * Given an encoder device node and a drm_encoder with a connected crtc,
+ * parse the encoder endpoint connecting to the crtc port.
+ */
+int drm_of_encoder_active_endpoint(struct device_node *node,
+				   struct drm_encoder *encoder,
+				   struct of_endpoint *endpoint)
+{
+	struct device_node *ep;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct device_node *port;
+	int ret;
+
+	if (!node || !crtc)
+		return -EINVAL;
+
+	for_each_endpoint_of_node(node, ep) {
+		port = of_graph_get_remote_port(ep);
+		of_node_put(port);
+		if (port == crtc->port) {
+			ret = of_graph_parse_endpoint(ep, endpoint);
+			of_node_put(ep);
+			return ret;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 27aa718..df6cdc7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -329,7 +329,7 @@
  * drm_gem_prime_export - helper library implementation of the export callback
  * @dev: drm_device to export from
  * @obj: GEM object to export
- * @flags: flags like DRM_CLOEXEC
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
  *
  * This is the implementation of the gem_prime_export functions for GEM drivers
  * using the PRIME helpers.
@@ -628,7 +628,6 @@
 				 struct drm_file *file_priv)
 {
 	struct drm_prime_handle *args = data;
-	uint32_t flags;
 
 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
 		return -EINVAL;
@@ -637,14 +636,11 @@
 		return -ENOSYS;
 
 	/* check flags are valid */
-	if (args->flags & ~DRM_CLOEXEC)
+	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
 		return -EINVAL;
 
-	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
-	flags = args->flags & DRM_CLOEXEC;
-
 	return dev->driver->prime_handle_to_fd(dev, file_priv,
-			args->handle, flags, &args->fd);
+			args->handle, args->flags, &args->fd);
 }
 
 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 332c55e..d8d5564 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -21,6 +21,7 @@
 
 #include "common.xml.h"
 #include "state.xml.h"
+#include "state_3d.xml.h"
 #include "cmdstream.xml.h"
 
 /*
@@ -85,10 +86,17 @@
 	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
 }
 
-static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
+static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
 {
-	u32 flush;
-	u32 stall;
+	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
+		       VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
+		       VIVS_GL_SEMAPHORE_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
+	struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+	u32 flush = 0;
 
 	/*
 	 * This assumes that if we're switching to 2D, we're switching
@@ -96,17 +104,13 @@
 	 * the 2D core, we need to flush the 3D depth and color caches,
 	 * otherwise we need to flush the 2D pixel engine cache.
 	 */
-	if (pipe == ETNA_PIPE_2D)
-		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
-	else
+	if (gpu->exec_state == ETNA_PIPE_2D)
 		flush = VIVS_GL_FLUSH_CACHE_PE2D;
-
-	stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
-		VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+	else if (gpu->exec_state == ETNA_PIPE_3D)
+		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
 
 	CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
-	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
-
+	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 
 	CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
@@ -131,6 +135,36 @@
 			ptr, len * 4, 0);
 }
 
+/*
+ * Safely replace the WAIT of a waitlink with a new command and argument.
+ * The GPU may be executing this WAIT while we're modifying it, so we have
+ * to write it in a specific order to avoid the GPU branching to somewhere
+ * else.  'wl_offset' is the offset to the first byte of the WAIT command.
+ */
+static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
+	unsigned int wl_offset, u32 cmd, u32 arg)
+{
+	u32 *lw = buffer->vaddr + wl_offset;
+
+	lw[1] = arg;
+	mb();
+	lw[0] = cmd;
+	mb();
+}
+
+/*
+ * Ensure that there is space in the command buffer to contiguously write
+ * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
+ */
+static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
+	struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
+{
+	if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
+		buffer->user_size = 0;
+
+	return gpu_va(gpu, buffer) + buffer->user_size;
+}
+
 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_cmdbuf *buffer = gpu->buffer;
@@ -147,81 +181,79 @@
 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+	unsigned int waitlink_offset = buffer->user_size - 16;
+	u32 link_target, flush = 0;
 
-	/* Replace the last WAIT with an END */
-	buffer->user_size -= 16;
+	if (gpu->exec_state == ETNA_PIPE_2D)
+		flush = VIVS_GL_FLUSH_CACHE_PE2D;
+	else if (gpu->exec_state == ETNA_PIPE_3D)
+		flush = VIVS_GL_FLUSH_CACHE_DEPTH |
+			VIVS_GL_FLUSH_CACHE_COLOR |
+			VIVS_GL_FLUSH_CACHE_TEXTURE |
+			VIVS_GL_FLUSH_CACHE_TEXTUREVS |
+			VIVS_GL_FLUSH_CACHE_SHADER_L2;
 
-	CMD_END(buffer);
-	mb();
+	if (flush) {
+		unsigned int dwords = 7;
+
+		link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+		if (gpu->exec_state == ETNA_PIPE_3D)
+			CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+				       VIVS_TS_FLUSH_CACHE_FLUSH);
+		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		CMD_END(buffer);
+
+		etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+					    VIV_FE_LINK_HEADER_OP_LINK |
+					    VIV_FE_LINK_HEADER_PREFETCH(dwords),
+					    link_target);
+	} else {
+		/* Replace the last link-wait with an "END" command */
+		etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+					    VIV_FE_END_HEADER_OP_END, 0);
+	}
 }
 
+/* Append a command buffer to the ring buffer. */
 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 	struct etnaviv_cmdbuf *cmdbuf)
 {
 	struct etnaviv_cmdbuf *buffer = gpu->buffer;
-	u32 *lw = buffer->vaddr + buffer->user_size - 16;
-	u32 back, link_target, link_size, reserve_size, extra_size = 0;
+	unsigned int waitlink_offset = buffer->user_size - 16;
+	u32 return_target, return_dwords;
+	u32 link_target, link_dwords;
 
 	if (drm_debug & DRM_UT_DRIVER)
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 
+	link_target = gpu_va(gpu, cmdbuf);
+	link_dwords = cmdbuf->size / 8;
+
 	/*
-	 * If we need to flush the MMU prior to submitting this buffer, we
-	 * will need to append a mmu flush load state, followed by a new
+	 * If we need maintanence prior to submitting this buffer, we will
+	 * need to append a mmu flush load state, followed by a new
 	 * link to this buffer - a total of four additional words.
 	 */
 	if (gpu->mmu->need_flush || gpu->switch_context) {
+		u32 target, extra_dwords;
+
 		/* link command */
-		extra_size += 2;
+		extra_dwords = 1;
+
 		/* flush command */
 		if (gpu->mmu->need_flush)
-			extra_size += 2;
+			extra_dwords += 1;
+
 		/* pipe switch commands */
 		if (gpu->switch_context)
-			extra_size += 8;
-	}
+			extra_dwords += 4;
 
-	reserve_size = (6 + extra_size) * 4;
-
-	/*
-	 * if we are going to completely overflow the buffer, we need to wrap.
-	 */
-	if (buffer->user_size + reserve_size > buffer->size)
-		buffer->user_size = 0;
-
-	/* save offset back into main buffer */
-	back = buffer->user_size + reserve_size - 6 * 4;
-	link_target = gpu_va(gpu, buffer) + buffer->user_size;
-	link_size = 6;
-
-	/* Skip over any extra instructions */
-	link_target += extra_size * sizeof(u32);
-
-	if (drm_debug & DRM_UT_DRIVER)
-		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
-			link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
-
-	/* jump back from cmd to main buffer */
-	CMD_LINK(cmdbuf, link_size, link_target);
-
-	link_target = gpu_va(gpu, cmdbuf);
-	link_size = cmdbuf->size / 8;
-
-
-
-	if (drm_debug & DRM_UT_DRIVER) {
-		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
-			       cmdbuf->vaddr, cmdbuf->size, 0);
-
-		pr_info("link op: %p\n", lw);
-		pr_info("link addr: %p\n", lw + 1);
-		pr_info("addr: 0x%08x\n", link_target);
-		pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
-		pr_info("event: %d\n", event);
-	}
-
-	if (gpu->mmu->need_flush || gpu->switch_context) {
-		u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
+		target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
 
 		if (gpu->mmu->need_flush) {
 			/* Add the MMU flush */
@@ -236,32 +268,59 @@
 		}
 
 		if (gpu->switch_context) {
-			etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
+			etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
+			gpu->exec_state = cmdbuf->exec_state;
 			gpu->switch_context = false;
 		}
 
-		/* And the link to the first buffer */
-		CMD_LINK(buffer, link_size, link_target);
+		/* And the link to the submitted buffer */
+		CMD_LINK(buffer, link_dwords, link_target);
 
 		/* Update the link target to point to above instructions */
-		link_target = new_target;
-		link_size = extra_size;
+		link_target = target;
+		link_dwords = extra_dwords;
 	}
 
-	/* trigger event */
+	/*
+	 * Append a LINK to the submitted command buffer to return to
+	 * the ring buffer.  return_target is the ring target address.
+	 * We need three dwords: event, wait, link.
+	 */
+	return_dwords = 3;
+	return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
+	CMD_LINK(cmdbuf, return_dwords, return_target);
+
+	/*
+	 * Append event, wait and link pointing back to the wait
+	 * command to the ring buffer.
+	 */
 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 		       VIVS_GL_EVENT_FROM_PE);
-
-	/* append WAIT/LINK to main buffer */
 	CMD_WAIT(buffer);
-	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
+	CMD_LINK(buffer, 2, return_target + 8);
 
-	/* Change WAIT into a LINK command; write the address first. */
-	*(lw + 1) = link_target;
-	mb();
-	*(lw) = VIV_FE_LINK_HEADER_OP_LINK |
-		VIV_FE_LINK_HEADER_PREFETCH(link_size);
-	mb();
+	if (drm_debug & DRM_UT_DRIVER)
+		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+			return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+
+	if (drm_debug & DRM_UT_DRIVER) {
+		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+			       cmdbuf->vaddr, cmdbuf->size, 0);
+
+		pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
+		pr_info("addr: 0x%08x\n", link_target);
+		pr_info("back: 0x%08x\n", return_target);
+		pr_info("event: %d\n", event);
+	}
+
+	/*
+	 * Kick off the submitted command by replacing the previous
+	 * WAIT with a link to the address in the ring buffer.
+	 */
+	etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+				    VIV_FE_LINK_HEADER_OP_LINK |
+				    VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
+				    link_target);
 
 	if (drm_debug & DRM_UT_DRIVER)
 		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 1cd6046..115c5bc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -75,9 +75,6 @@
 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
-	struct drm_gem_object *obj, u32 *iova);
-void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
 void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 97d4457..281c6ec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -260,8 +260,32 @@
 	return NULL;
 }
 
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
-	struct drm_gem_object *obj, u32 *iova)
+void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
+{
+	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+	drm_gem_object_reference(&etnaviv_obj->base);
+
+	mutex_lock(&etnaviv_obj->lock);
+	WARN_ON(mapping->use == 0);
+	mapping->use += 1;
+	mutex_unlock(&etnaviv_obj->lock);
+}
+
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
+{
+	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+	mutex_lock(&etnaviv_obj->lock);
+	WARN_ON(mapping->use == 0);
+	mapping->use -= 1;
+	mutex_unlock(&etnaviv_obj->lock);
+
+	drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+}
+
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+	struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
 {
 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 	struct etnaviv_vram_mapping *mapping;
@@ -329,28 +353,12 @@
 out:
 	mutex_unlock(&etnaviv_obj->lock);
 
-	if (!ret) {
-		/* Take a reference on the object */
-		drm_gem_object_reference(obj);
-		*iova = mapping->iova;
-	}
+	if (ret)
+		return ERR_PTR(ret);
 
-	return ret;
-}
-
-void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
-{
-	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-	struct etnaviv_vram_mapping *mapping;
-
-	mutex_lock(&etnaviv_obj->lock);
-	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
-
-	WARN_ON(mapping->use == 0);
-	mapping->use -= 1;
-	mutex_unlock(&etnaviv_obj->lock);
-
-	drm_gem_object_unreference_unlocked(obj);
+	/* Take a reference on the object */
+	drm_gem_object_reference(obj);
+	return mapping;
 }
 
 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index ab5df81..02665d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -88,6 +88,12 @@
 
 #define MAX_CMDS 4
 
+struct etnaviv_gem_submit_bo {
+	u32 flags;
+	struct etnaviv_gem_object *obj;
+	struct etnaviv_vram_mapping *mapping;
+};
+
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
  * associated with the cmdstream submission for synchronization (and
  * make it easier to unwind when things go wrong, etc).  This only
@@ -99,11 +105,7 @@
 	struct ww_acquire_ctx ticket;
 	u32 fence;
 	unsigned int nr_bos;
-	struct {
-		u32 flags;
-		struct etnaviv_gem_object *obj;
-		u32 iova;
-	} bos[0];
+	struct etnaviv_gem_submit_bo bos[0];
 };
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
@@ -115,4 +117,9 @@
 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
 void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
 
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+	struct drm_gem_object *obj, struct etnaviv_gpu *gpu);
+void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping);
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
+
 #endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1aba01a..236ada9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -187,12 +187,10 @@
 	int i;
 
 	for (i = 0; i < submit->nr_bos; i++) {
-		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-
 		if (submit->bos[i].flags & BO_PINNED)
-			etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
+			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
 
-		submit->bos[i].iova = 0;
+		submit->bos[i].mapping = NULL;
 		submit->bos[i].flags &= ~BO_PINNED;
 	}
 }
@@ -203,22 +201,24 @@
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-		u32 iova;
+		struct etnaviv_vram_mapping *mapping;
 
-		ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
-					   &iova);
-		if (ret)
+		mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
+						  submit->gpu);
+		if (IS_ERR(mapping)) {
+			ret = PTR_ERR(mapping);
 			break;
+		}
 
 		submit->bos[i].flags |= BO_PINNED;
-		submit->bos[i].iova = iova;
+		submit->bos[i].mapping = mapping;
 	}
 
 	return ret;
 }
 
 static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
-		struct etnaviv_gem_object **obj, u32 *iova)
+	struct etnaviv_gem_submit_bo **bo)
 {
 	if (idx >= submit->nr_bos) {
 		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -226,10 +226,7 @@
 		return -EINVAL;
 	}
 
-	if (obj)
-		*obj = submit->bos[idx].obj;
-	if (iova)
-		*iova = submit->bos[idx].iova;
+	*bo = &submit->bos[idx];
 
 	return 0;
 }
@@ -245,8 +242,8 @@
 
 	for (i = 0; i < nr_relocs; i++) {
 		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
-		struct etnaviv_gem_object *bobj;
-		u32 iova, off;
+		struct etnaviv_gem_submit_bo *bo;
+		u32 off;
 
 		if (unlikely(r->flags)) {
 			DRM_ERROR("invalid reloc flags\n");
@@ -268,17 +265,16 @@
 			return -EINVAL;
 		}
 
-		ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
+		ret = submit_bo(submit, r->reloc_idx, &bo);
 		if (ret)
 			return ret;
 
-		if (r->reloc_offset >=
-		    bobj->base.size - sizeof(*ptr)) {
+		if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
 			DRM_ERROR("relocation %u outside object", i);
 			return -EINVAL;
 		}
 
-		ptr[off] = iova + r->reloc_offset;
+		ptr[off] = bo->mapping->iova + r->reloc_offset;
 
 		last_offset = off;
 	}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 3c1ce44..09198d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -628,6 +628,7 @@
 	/* Now program the hardware */
 	mutex_lock(&gpu->lock);
 	etnaviv_gpu_hw_init(gpu);
+	gpu->exec_state = -1;
 	mutex_unlock(&gpu->lock);
 
 	pm_runtime_mark_last_busy(gpu->dev);
@@ -871,17 +872,13 @@
 		gpu->event[i].fence = NULL;
 		gpu->event[i].used = false;
 		complete(&gpu->event_free);
-		/*
-		 * Decrement the PM count for each stuck event. This is safe
-		 * even in atomic context as we use ASYNC RPM here.
-		 */
-		pm_runtime_put_autosuspend(gpu->dev);
 	}
 	spin_unlock_irqrestore(&gpu->event_spinlock, flags);
 	gpu->completed_fence = gpu->active_fence;
 
 	etnaviv_gpu_hw_init(gpu);
 	gpu->switch_context = true;
+	gpu->exec_state = -1;
 
 	mutex_unlock(&gpu->lock);
 	pm_runtime_mark_last_busy(gpu->dev);
@@ -1106,7 +1103,7 @@
 	size_t nr_bos)
 {
 	struct etnaviv_cmdbuf *cmdbuf;
-	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
+	size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
 				 sizeof(*cmdbuf));
 
 	cmdbuf = kzalloc(sz, GFP_KERNEL);
@@ -1150,14 +1147,23 @@
 		fence_put(cmdbuf->fence);
 
 		for (i = 0; i < cmdbuf->nr_bos; i++) {
-			struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
+			struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
+			struct etnaviv_gem_object *etnaviv_obj = mapping->object;
 
 			atomic_dec(&etnaviv_obj->gpu_active);
 			/* drop the refcount taken in etnaviv_gpu_submit */
-			etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
+			etnaviv_gem_mapping_unreference(mapping);
 		}
 
 		etnaviv_gpu_cmdbuf_free(cmdbuf);
+		/*
+		 * We need to balance the runtime PM count caused by
+		 * each submission.  Upon submission, we increment
+		 * the runtime PM counter, and allocate one event.
+		 * So here, we put the runtime PM count for each
+		 * completed event.
+		 */
+		pm_runtime_put_autosuspend(gpu->dev);
 	}
 
 	gpu->retired_fence = fence;
@@ -1304,11 +1310,10 @@
 
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-		u32 iova;
 
-		/* Each cmdbuf takes a refcount on the iova */
-		etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
-		cmdbuf->bo[i] = etnaviv_obj;
+		/* Each cmdbuf takes a refcount on the mapping */
+		etnaviv_gem_mapping_reference(submit->bos[i].mapping);
+		cmdbuf->bo_map[i] = submit->bos[i].mapping;
 		atomic_inc(&etnaviv_obj->gpu_active);
 
 		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
@@ -1378,15 +1383,6 @@
 				gpu->completed_fence = fence->seqno;
 
 			event_free(gpu, event);
-
-			/*
-			 * We need to balance the runtime PM count caused by
-			 * each submission.  Upon submission, we increment
-			 * the runtime PM counter, and allocate one event.
-			 * So here, we put the runtime PM count for each
-			 * completed event.
-			 */
-			pm_runtime_put_autosuspend(gpu->dev);
 		}
 
 		/* Retire the buffer objects in a work */
@@ -1481,6 +1477,7 @@
 	etnaviv_gpu_hw_init(gpu);
 
 	gpu->switch_context = true;
+	gpu->exec_state = -1;
 
 	mutex_unlock(&gpu->lock);
 
@@ -1569,6 +1566,7 @@
 {
 	struct device *dev = &pdev->dev;
 	struct etnaviv_gpu *gpu;
+	u32 dma_mask;
 	int err = 0;
 
 	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1579,12 +1577,16 @@
 	mutex_init(&gpu->lock);
 
 	/*
-	 * Set the GPU base address to the start of physical memory.  This
-	 * ensures that if we have up to 2GB, the v1 MMU can address the
-	 * highest memory.  This is important as command buffers may be
-	 * allocated outside of this limit.
+	 * Set the GPU linear window to be at the end of the DMA window, where
+	 * the CMA area is likely to reside. This ensures that we are able to
+	 * map the command buffers while having the linear window overlap as
+	 * much RAM as possible, so we can optimize mappings for other buffers.
 	 */
-	gpu->memory_base = PHYS_OFFSET;
+	dma_mask = (u32)dma_get_required_mask(dev);
+	if (dma_mask < PHYS_OFFSET + SZ_2G)
+		gpu->memory_base = PHYS_OFFSET;
+	else
+		gpu->memory_base = dma_mask - SZ_2G + 1;
 
 	/* Map registers: */
 	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f233ac4..f5321e2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -23,6 +23,7 @@
 #include "etnaviv_drv.h"
 
 struct etnaviv_gem_submit;
+struct etnaviv_vram_mapping;
 
 struct etnaviv_chip_identity {
 	/* Chip model. */
@@ -103,6 +104,7 @@
 
 	/* 'ring'-buffer: */
 	struct etnaviv_cmdbuf *buffer;
+	int exec_state;
 
 	/* bus base address of memory  */
 	u32 memory_base;
@@ -166,7 +168,7 @@
 	struct list_head node;
 	/* BOs attached to this command buffer */
 	unsigned int nr_bos;
-	struct etnaviv_gem_object *bo[0];
+	struct etnaviv_vram_mapping *bo_map[0];
 };
 
 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 6743bc6..29a723f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -193,7 +193,7 @@
 
 		/*
 		 * Unmap the blocks which need to be reaped from the MMU.
-		 * Clear the mmu pointer to prevent the get_iova finding
+		 * Clear the mmu pointer to prevent the mapping_get finding
 		 * this mapping.
 		 */
 		list_for_each_entry_safe(m, n, &list, scan_node) {
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
new file mode 100644
index 0000000..d7146fd
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -0,0 +1,9 @@
+#ifndef STATE_3D_XML
+#define STATE_3D_XML
+
+/* This is a cut-down version of the state_3d.xml.h file */
+
+#define VIVS_TS_FLUSH_CACHE					0x00001650
+#define VIVS_TS_FLUSH_CACHE_FLUSH				0x00000001
+
+#endif /* STATE_3D_XML */
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 6496532..968b31c 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,7 +2,6 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
 exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
 		exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
 		exynos_drm_plane.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 162ab93..5245bc5 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -429,7 +429,7 @@
 	set_bit(BIT_SUSPENDED, &ctx->flags);
 }
 
-void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
+static void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
 
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 52bda3b..9336107 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -60,7 +60,6 @@
 	wait_queue_head_t		wait_vsync_queue;
 	atomic_t			wait_vsync_event;
 
-	struct exynos_drm_panel_info panel;
 	struct drm_encoder *encoder;
 };
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 673164b..cff8dc7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -977,9 +977,7 @@
 		return 0;
 	}
 
-	drm_display_mode_from_videomode(&dp->priv.vm, mode);
-	mode->width_mm = dp->priv.width_mm;
-	mode->height_mm = dp->priv.height_mm;
+	drm_display_mode_from_videomode(&dp->vm, mode);
 	connector->display_info.width_mm = mode->width_mm;
 	connector->display_info.height_mm = mode->height_mm;
 
@@ -1155,13 +1153,6 @@
 	return 0;
 }
 
-static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
-				 const struct drm_display_mode *mode,
-				 struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void exynos_dp_mode_set(struct drm_encoder *encoder,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode)
@@ -1177,7 +1168,6 @@
 }
 
 static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
-	.mode_fixup = exynos_dp_mode_fixup,
 	.mode_set = exynos_dp_mode_set,
 	.enable = exynos_dp_enable,
 	.disable = exynos_dp_disable,
@@ -1249,8 +1239,7 @@
 {
 	int ret;
 
-	ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
-			OF_USE_NATIVE_MODE);
+	ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
 	if (ret) {
 		DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 66eec4b..b5c2d8f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -16,6 +16,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/exynos_drm.h>
+#include <video/videomode.h>
 
 #include "exynos_drm_drv.h"
 
@@ -164,8 +165,7 @@
 	struct phy		*phy;
 	int			dpms_mode;
 	int			hpd_gpio;
-
-	struct exynos_drm_panel_info priv;
+	struct videomode	vm;
 };
 
 /* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 05350ae..75e570f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -128,13 +128,6 @@
 	return 0;
 }
 
-static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void exynos_dpi_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -162,7 +155,6 @@
 }
 
 static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
-	.mode_fixup = exynos_dpi_mode_fixup,
 	.mode_set = exynos_dpi_mode_set,
 	.enable = exynos_dpi_enable,
 	.disable = exynos_dpi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 68f0f36..5344940 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -130,6 +130,8 @@
 	exynos_atomic_commit_complete(commit);
 }
 
+static struct device *exynos_drm_get_dma_device(void);
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct exynos_drm_private *private;
@@ -147,6 +149,16 @@
 	dev_set_drvdata(dev->dev, dev);
 	dev->dev_private = (void *)private;
 
+	/* the first real CRTC device is used for all dma mapping operations */
+	private->dma_dev = exynos_drm_get_dma_device();
+	if (!private->dma_dev) {
+		DRM_ERROR("no device found for DMA mapping operations.\n");
+		ret = -ENODEV;
+		goto err_free_private;
+	}
+	DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+		 dev_name(private->dma_dev));
+
 	/*
 	 * create mapping to manage iommu table and set a pointer to iommu
 	 * mapping structure to iommu_mapping of private data.
@@ -340,20 +352,6 @@
 
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
-	struct drm_pending_event *e, *et;
-	unsigned long flags;
-
-	if (!file->driver_priv)
-		return;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-	/* Release all events handled by page flip handler but not freed. */
-	list_for_each_entry_safe(e, et, &file->event_list, link) {
-		list_del(&e->link);
-		e->destroy(e);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-
 	kfree(file->driver_priv);
 	file->driver_priv = NULL;
 }
@@ -372,6 +370,8 @@
 static const struct drm_ioctl_desc exynos_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
 			DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
+			DRM_AUTH | DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
 			DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
@@ -495,70 +495,66 @@
 /* forward declaration */
 static struct platform_driver exynos_drm_platform_driver;
 
+struct exynos_drm_driver_info {
+	struct platform_driver *driver;
+	unsigned int flags;
+};
+
+#define DRM_COMPONENT_DRIVER	BIT(0)	/* supports component framework */
+#define DRM_VIRTUAL_DEVICE	BIT(1)	/* create virtual platform device */
+#define DRM_DMA_DEVICE		BIT(2)	/* can be used for dma allocations */
+
+#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
+
 /*
  * Connector drivers should not be placed before associated crtc drivers,
  * because connector requires pipe number of its crtc during initialization.
  */
-static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_FIMD
-	&fimd_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS5433_DECON
-	&exynos5433_decon_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS7_DECON
-	&decon_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_MIC
-	&mic_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_DP
-	&dp_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_DSI
-	&dsi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_MIXER
-	&mixer_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_HDMI
-	&hdmi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_VIDI
-	&vidi_driver,
-#endif
+static struct exynos_drm_driver_info exynos_drm_drivers[] = {
+	{
+		DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
+		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+	}, {
+		DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
+		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+	}, {
+		DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
+		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+	}, {
+		DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
+		DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+	}, {
+		DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
+		DRM_COMPONENT_DRIVER
+	}, {
+		DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
+		DRM_COMPONENT_DRIVER
+	}, {
+		DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
+		DRM_COMPONENT_DRIVER
+	}, {
+		DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
+		DRM_COMPONENT_DRIVER
+	}, {
+		DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
+		DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
+	}, {
+		DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+	}, {
+		DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
+	}, {
+		DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
+	}, {
+		DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
+	}, {
+		DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
+		DRM_VIRTUAL_DEVICE
+	}, {
+		&exynos_drm_platform_driver,
+		DRM_VIRTUAL_DEVICE
+	}
 };
 
-static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_G2D
-	&g2d_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_FIMC
-	&fimc_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
-	&rotator_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_GSC
-	&gsc_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_IPP
-	&ipp_driver,
-#endif
-	&exynos_drm_platform_driver,
-};
-
-static struct platform_driver *const exynos_drm_drv_with_simple_dev[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
-	&vidi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_IPP
-	&ipp_driver,
-#endif
-	&exynos_drm_platform_driver,
-};
-#define PDEV_COUNT ARRAY_SIZE(exynos_drm_drv_with_simple_dev)
-
 static int compare_dev(struct device *dev, void *data)
 {
 	return dev == (struct device *)data;
@@ -569,11 +565,15 @@
 	struct component_match *match = NULL;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
-		struct device_driver *drv = &exynos_drm_kms_drivers[i]->driver;
+	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
 		struct device *p = NULL, *d;
 
-		while ((d = bus_find_device(&platform_bus_type, p, drv,
+		if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
+			continue;
+
+		while ((d = bus_find_device(&platform_bus_type, p,
+					    &info->driver->driver,
 					    (void *)platform_bus_type.match))) {
 			put_device(p);
 			component_match_add(dev, &match, compare_dev, d);
@@ -630,91 +630,102 @@
 	},
 };
 
-static struct platform_device *exynos_drm_pdevs[PDEV_COUNT];
+static struct device *exynos_drm_get_dma_device(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+		struct device *dev;
+
+		if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+			continue;
+
+		while ((dev = bus_find_device(&platform_bus_type, NULL,
+					    &info->driver->driver,
+					    (void *)platform_bus_type.match))) {
+			put_device(dev);
+			return dev;
+		}
+	}
+	return NULL;
+}
 
 static void exynos_drm_unregister_devices(void)
 {
-	int i = PDEV_COUNT;
+	int i;
 
-	while (--i >= 0) {
-		platform_device_unregister(exynos_drm_pdevs[i]);
-		exynos_drm_pdevs[i] = NULL;
+	for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+		struct device *dev;
+
+		if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
+			continue;
+
+		while ((dev = bus_find_device(&platform_bus_type, NULL,
+					    &info->driver->driver,
+					    (void *)platform_bus_type.match))) {
+			put_device(dev);
+			platform_device_unregister(to_platform_device(dev));
+		}
 	}
 }
 
 static int exynos_drm_register_devices(void)
 {
+	struct platform_device *pdev;
 	int i;
 
-	for (i = 0; i < PDEV_COUNT; ++i) {
-		struct platform_driver *d = exynos_drm_drv_with_simple_dev[i];
-		struct platform_device *pdev =
-			platform_device_register_simple(d->driver.name, -1,
-							NULL, 0);
+	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
 
-		if (!IS_ERR(pdev)) {
-			exynos_drm_pdevs[i] = pdev;
+		if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
 			continue;
-		}
-		while (--i >= 0) {
-			platform_device_unregister(exynos_drm_pdevs[i]);
-			exynos_drm_pdevs[i] = NULL;
-		}
 
-		return PTR_ERR(pdev);
+		pdev = platform_device_register_simple(
+					info->driver->driver.name, -1, NULL, 0);
+		if (IS_ERR(pdev))
+			goto fail;
 	}
 
 	return 0;
+fail:
+	exynos_drm_unregister_devices();
+	return PTR_ERR(pdev);
 }
 
-static void exynos_drm_unregister_drivers(struct platform_driver * const *drv,
-					  int count)
+static void exynos_drm_unregister_drivers(void)
 {
-	while (--count >= 0)
-		platform_driver_unregister(drv[count]);
+	int i;
+
+	for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+
+		if (!info->driver)
+			continue;
+
+		platform_driver_unregister(info->driver);
+	}
 }
 
-static int exynos_drm_register_drivers(struct platform_driver * const *drv,
-				       int count)
+static int exynos_drm_register_drivers(void)
 {
 	int i, ret;
 
-	for (i = 0; i < count; ++i) {
-		ret = platform_driver_register(drv[i]);
-		if (!ret)
+	for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+		struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+
+		if (!info->driver)
 			continue;
 
-		while (--i >= 0)
-			platform_driver_unregister(drv[i]);
-
-		return ret;
+		ret = platform_driver_register(info->driver);
+		if (ret)
+			goto fail;
 	}
-
 	return 0;
-}
-
-static inline int exynos_drm_register_kms_drivers(void)
-{
-	return exynos_drm_register_drivers(exynos_drm_kms_drivers,
-					ARRAY_SIZE(exynos_drm_kms_drivers));
-}
-
-static inline int exynos_drm_register_non_kms_drivers(void)
-{
-	return exynos_drm_register_drivers(exynos_drm_non_kms_drivers,
-					ARRAY_SIZE(exynos_drm_non_kms_drivers));
-}
-
-static inline void exynos_drm_unregister_kms_drivers(void)
-{
-	exynos_drm_unregister_drivers(exynos_drm_kms_drivers,
-					ARRAY_SIZE(exynos_drm_kms_drivers));
-}
-
-static inline void exynos_drm_unregister_non_kms_drivers(void)
-{
-	exynos_drm_unregister_drivers(exynos_drm_non_kms_drivers,
-					ARRAY_SIZE(exynos_drm_non_kms_drivers));
+fail:
+	exynos_drm_unregister_drivers();
+	return ret;
 }
 
 static int exynos_drm_init(void)
@@ -725,19 +736,12 @@
 	if (ret)
 		return ret;
 
-	ret = exynos_drm_register_kms_drivers();
+	ret = exynos_drm_register_drivers();
 	if (ret)
 		goto err_unregister_pdevs;
 
-	ret = exynos_drm_register_non_kms_drivers();
-	if (ret)
-		goto err_unregister_kms_drivers;
-
 	return 0;
 
-err_unregister_kms_drivers:
-	exynos_drm_unregister_kms_drivers();
-
 err_unregister_pdevs:
 	exynos_drm_unregister_devices();
 
@@ -746,8 +750,7 @@
 
 static void exynos_drm_exit(void)
 {
-	exynos_drm_unregister_non_kms_drivers();
-	exynos_drm_unregister_kms_drivers();
+	exynos_drm_unregister_drivers();
 	exynos_drm_unregister_devices();
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 17b5ded..502f750 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -219,8 +219,10 @@
 	struct drm_crtc *crtc[MAX_CRTC];
 	struct drm_property *plane_zpos_property;
 
+	struct device *dma_dev;
 	unsigned long da_start;
 	unsigned long da_space_size;
+	void *mapping;
 
 	unsigned int pipe;
 
@@ -230,6 +232,13 @@
 	wait_queue_head_t	wait;
 };
 
+static inline struct device *to_dma_dev(struct drm_device *dev)
+{
+	struct exynos_drm_private *priv = dev->dev_private;
+
+	return priv->dma_dev;
+}
+
 /*
  * Exynos drm sub driver structure.
  *
@@ -297,7 +306,6 @@
 extern struct platform_driver dsi_driver;
 extern struct platform_driver mixer_driver;
 extern struct platform_driver hdmi_driver;
-extern struct platform_driver exynos_drm_common_hdmi_driver;
 extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
 extern struct platform_driver fimc_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 26e81d19..63c84a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#include <asm/unaligned.h>
+
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_mipi_dsi.h>
@@ -209,12 +211,6 @@
 
 #define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
 
-#define REG_ADDR(dsi, reg_idx)		((dsi)->reg_base + \
-					dsi->driver_data->reg_ofs[(reg_idx)])
-#define DSI_WRITE(dsi, reg_idx, val)	writel((val), \
-					REG_ADDR((dsi), (reg_idx)))
-#define DSI_READ(dsi, reg_idx)		readl(REG_ADDR((dsi), (reg_idx)))
-
 static char *clk_names[5] = { "bus_clk", "sclk_mipi",
 	"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
 	"sclk_rgb_vclk_to_dsim0" };
@@ -228,12 +224,8 @@
 	struct list_head list;
 	struct completion completed;
 	int result;
-	u8 data_id;
-	u8 data[2];
+	struct mipi_dsi_packet packet;
 	u16 flags;
-
-	const u8 *tx_payload;
-	u16 tx_len;
 	u16 tx_done;
 
 	u8 *rx_payload;
@@ -247,7 +239,7 @@
 #define DSIM_STATE_VIDOUT_AVAILABLE	BIT(3)
 
 struct exynos_dsi_driver_data {
-	unsigned int *reg_ofs;
+	const unsigned int *reg_ofs;
 	unsigned int plltmr_reg;
 	unsigned int has_freqband:1;
 	unsigned int has_clklane_stop:1;
@@ -255,7 +247,7 @@
 	unsigned int max_freq;
 	unsigned int wait_for_reset;
 	unsigned int num_bits_resol;
-	unsigned int *reg_values;
+	const unsigned int *reg_values;
 };
 
 struct exynos_dsi {
@@ -324,7 +316,20 @@
 	DSIM_PHYTIMING2_REG,
 	NUM_REGS
 };
-static unsigned int exynos_reg_ofs[] = {
+
+static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx,
+				    u32 val)
+{
+
+	writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx)
+{
+	return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static const unsigned int exynos_reg_ofs[] = {
 	[DSIM_STATUS_REG] =  0x00,
 	[DSIM_SWRST_REG] =  0x04,
 	[DSIM_CLKCTRL_REG] =  0x08,
@@ -348,7 +353,7 @@
 	[DSIM_PHYTIMING2_REG] =  0x6c,
 };
 
-static unsigned int exynos5433_reg_ofs[] = {
+static const unsigned int exynos5433_reg_ofs[] = {
 	[DSIM_STATUS_REG] = 0x04,
 	[DSIM_SWRST_REG] = 0x0C,
 	[DSIM_CLKCTRL_REG] = 0x10,
@@ -390,7 +395,7 @@
 	PHYTIMING_HS_TRAIL
 };
 
-static unsigned int reg_values[] = {
+static const unsigned int reg_values[] = {
 	[RESET_TYPE] = DSIM_SWRST,
 	[PLL_TIMER] = 500,
 	[STOP_STATE_CNT] = 0xf,
@@ -408,7 +413,25 @@
 	[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
 };
 
-static unsigned int exynos5433_reg_values[] = {
+static const unsigned int exynos5422_reg_values[] = {
+	[RESET_TYPE] = DSIM_SWRST,
+	[PLL_TIMER] = 500,
+	[STOP_STATE_CNT] = 0xf,
+	[PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+	[PHYCTRL_VREG_LP] = 0,
+	[PHYCTRL_SLEW_UP] = 0,
+	[PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
+	[PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
+	[PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+	[PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
+	[PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+	[PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
+	[PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
+	[PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
+	[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
+};
+
+static const unsigned int exynos5433_reg_values[] = {
 	[RESET_TYPE] = DSIM_FUNCRST,
 	[PLL_TIMER] = 22200,
 	[STOP_STATE_CNT] = 0xa,
@@ -426,7 +449,7 @@
 	[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
 };
 
-static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
 	.reg_ofs = exynos_reg_ofs,
 	.plltmr_reg = 0x50,
 	.has_freqband = 1,
@@ -438,7 +461,7 @@
 	.reg_values = reg_values,
 };
 
-static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
 	.reg_ofs = exynos_reg_ofs,
 	.plltmr_reg = 0x50,
 	.has_freqband = 1,
@@ -450,7 +473,7 @@
 	.reg_values = reg_values,
 };
 
-static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
 	.reg_ofs = exynos_reg_ofs,
 	.plltmr_reg = 0x58,
 	.has_clklane_stop = 1,
@@ -461,7 +484,7 @@
 	.reg_values = reg_values,
 };
 
-static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
 	.reg_ofs = exynos_reg_ofs,
 	.plltmr_reg = 0x58,
 	.num_clks = 2,
@@ -471,7 +494,7 @@
 	.reg_values = reg_values,
 };
 
-static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
 	.reg_ofs = exynos5433_reg_ofs,
 	.plltmr_reg = 0xa0,
 	.has_clklane_stop = 1,
@@ -482,7 +505,18 @@
 	.reg_values = exynos5433_reg_values,
 };
 
-static struct of_device_id exynos_dsi_of_match[] = {
+static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = {
+	.reg_ofs = exynos5433_reg_ofs,
+	.plltmr_reg = 0xa0,
+	.has_clklane_stop = 1,
+	.num_clks = 2,
+	.max_freq = 1500,
+	.wait_for_reset = 1,
+	.num_bits_resol = 12,
+	.reg_values = exynos5422_reg_values,
+};
+
+static const struct of_device_id exynos_dsi_of_match[] = {
 	{ .compatible = "samsung,exynos3250-mipi-dsi",
 	  .data = &exynos3_dsi_driver_data },
 	{ .compatible = "samsung,exynos4210-mipi-dsi",
@@ -491,6 +525,8 @@
 	  .data = &exynos4415_dsi_driver_data },
 	{ .compatible = "samsung,exynos5410-mipi-dsi",
 	  .data = &exynos5_dsi_driver_data },
+	{ .compatible = "samsung,exynos5422-mipi-dsi",
+	  .data = &exynos5422_dsi_driver_data },
 	{ .compatible = "samsung,exynos5433-mipi-dsi",
 	  .data = &exynos5433_dsi_driver_data },
 	{ }
@@ -515,10 +551,10 @@
 
 static void exynos_dsi_reset(struct exynos_dsi *dsi)
 {
-	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+	u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
 
 	reinit_completion(&dsi->completed);
-	DSI_WRITE(dsi, DSIM_SWRST_REG, driver_data->reg_values[RESET_TYPE]);
+	exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val);
 }
 
 #ifndef MHZ
@@ -621,7 +657,7 @@
 		reg |= DSIM_FREQ_BAND(band);
 	}
 
-	DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
 
 	timeout = 1000;
 	do {
@@ -629,7 +665,7 @@
 			dev_err(dsi->dev, "PLL failed to stabilize\n");
 			return 0;
 		}
-		reg = DSI_READ(dsi, DSIM_STATUS_REG);
+		reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
 	} while ((reg & DSIM_PLL_STABLE) == 0);
 
 	return fout;
@@ -659,7 +695,7 @@
 	dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
 		hs_clk, byte_clk, esc_clk);
 
-	reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
+	reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
 	reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
 			| DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
 			| DSIM_BYTE_CLK_SRC_MASK);
@@ -669,7 +705,7 @@
 			| DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
 			| DSIM_BYTE_CLK_SRC(0)
 			| DSIM_TX_REQUEST_HSCLK;
-	DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
 
 	return 0;
 }
@@ -677,7 +713,7 @@
 static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
 {
 	struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
-	unsigned int *reg_values = driver_data->reg_values;
+	const unsigned int *reg_values = driver_data->reg_values;
 	u32 reg;
 
 	if (driver_data->has_freqband)
@@ -686,7 +722,7 @@
 	/* B D-PHY: D-PHY Master & Slave Analog Block control */
 	reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
 		reg_values[PHYCTRL_SLEW_UP];
-	DSI_WRITE(dsi, DSIM_PHYCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg);
 
 	/*
 	 * T LPX: Transmitted length of any Low-Power state period
@@ -694,7 +730,7 @@
 	 *	burst
 	 */
 	reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
-	DSI_WRITE(dsi, DSIM_PHYTIMING_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg);
 
 	/*
 	 * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
@@ -714,7 +750,7 @@
 		reg_values[PHYTIMING_CLK_POST] |
 		reg_values[PHYTIMING_CLK_TRAIL];
 
-	DSI_WRITE(dsi, DSIM_PHYTIMING1_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg);
 
 	/*
 	 * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
@@ -727,29 +763,29 @@
 	 */
 	reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
 		reg_values[PHYTIMING_HS_TRAIL];
-	DSI_WRITE(dsi, DSIM_PHYTIMING2_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg);
 }
 
 static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
 {
 	u32 reg;
 
-	reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
+	reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
 	reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
 			| DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
-	DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
 
-	reg = DSI_READ(dsi, DSIM_PLLCTRL_REG);
+	reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG);
 	reg &= ~DSIM_PLL_EN;
-	DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
 }
 
 static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
 {
-	u32 reg = DSI_READ(dsi, DSIM_CONFIG_REG);
+	u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG);
 	reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
 			DSIM_LANE_EN(lane));
-	DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
+	exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
 }
 
 static int exynos_dsi_init_link(struct exynos_dsi *dsi)
@@ -760,14 +796,14 @@
 	u32 lanes_mask;
 
 	/* Initialize FIFO pointers */
-	reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
+	reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
 	reg &= ~0x1f;
-	DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
 
 	usleep_range(9000, 11000);
 
 	reg |= 0x1f;
-	DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
 	usleep_range(9000, 11000);
 
 	/* DSI configuration */
@@ -836,7 +872,7 @@
 			dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
 		reg |= DSIM_CLKLANE_STOP;
 	}
-	DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
+	exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
 
 	lanes_mask = BIT(dsi->lanes) - 1;
 	exynos_dsi_enable_lane(dsi, lanes_mask);
@@ -849,19 +885,19 @@
 			return -EFAULT;
 		}
 
-		reg = DSI_READ(dsi, DSIM_STATUS_REG);
+		reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
 		if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
 		    != DSIM_STOP_STATE_DAT(lanes_mask))
 			continue;
 	} while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
 
-	reg = DSI_READ(dsi, DSIM_ESCMODE_REG);
+	reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
 	reg &= ~DSIM_STOP_STATE_CNT_MASK;
 	reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
-	DSI_WRITE(dsi, DSIM_ESCMODE_REG, reg);
+	exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg);
 
 	reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
-	DSI_WRITE(dsi, DSIM_TIMEOUT_REG, reg);
+	exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg);
 
 	return 0;
 }
@@ -876,20 +912,20 @@
 		reg = DSIM_CMD_ALLOW(0xf)
 			| DSIM_STABLE_VFP(vm->vfront_porch)
 			| DSIM_MAIN_VBP(vm->vback_porch);
-		DSI_WRITE(dsi, DSIM_MVPORCH_REG, reg);
+		exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
 
 		reg = DSIM_MAIN_HFP(vm->hfront_porch)
 			| DSIM_MAIN_HBP(vm->hback_porch);
-		DSI_WRITE(dsi, DSIM_MHPORCH_REG, reg);
+		exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
 
 		reg = DSIM_MAIN_VSA(vm->vsync_len)
 			| DSIM_MAIN_HSA(vm->hsync_len);
-		DSI_WRITE(dsi, DSIM_MSYNC_REG, reg);
+		exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
 	}
 	reg =  DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) |
 		DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol);
 
-	DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
 
 	dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
 }
@@ -898,12 +934,12 @@
 {
 	u32 reg;
 
-	reg = DSI_READ(dsi, DSIM_MDRESOL_REG);
+	reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG);
 	if (enable)
 		reg |= DSIM_MAIN_STAND_BY;
 	else
 		reg &= ~DSIM_MAIN_STAND_BY;
-	DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
+	exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
 }
 
 static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
@@ -911,7 +947,7 @@
 	int timeout = 2000;
 
 	do {
-		u32 reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
+		u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
 
 		if (!(reg & DSIM_SFR_HEADER_FULL))
 			return 0;
@@ -925,34 +961,35 @@
 
 static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
 {
-	u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
+	u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
 
 	if (lpm)
 		v |= DSIM_CMD_LPDT_LP;
 	else
 		v &= ~DSIM_CMD_LPDT_LP;
 
-	DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
+	exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
 }
 
 static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
 {
-	u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
+	u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
 	v |= DSIM_FORCE_BTA;
-	DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
+	exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
 }
 
 static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
 					struct exynos_dsi_transfer *xfer)
 {
 	struct device *dev = dsi->dev;
-	const u8 *payload = xfer->tx_payload + xfer->tx_done;
-	u16 length = xfer->tx_len - xfer->tx_done;
+	struct mipi_dsi_packet *pkt = &xfer->packet;
+	const u8 *payload = pkt->payload + xfer->tx_done;
+	u16 length = pkt->payload_length - xfer->tx_done;
 	bool first = !xfer->tx_done;
 	u32 reg;
 
 	dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
-		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+		xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
 
 	if (length > DSI_TX_FIFO_SIZE)
 		length = DSI_TX_FIFO_SIZE;
@@ -961,9 +998,8 @@
 
 	/* Send payload */
 	while (length >= 4) {
-		reg = (payload[3] << 24) | (payload[2] << 16)
-					| (payload[1] << 8) | payload[0];
-		DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
+		reg = get_unaligned_le32(payload);
+		exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
 		payload += 4;
 		length -= 4;
 	}
@@ -978,10 +1014,7 @@
 		/* Fall through */
 	case 1:
 		reg |= payload[0];
-		DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
-		break;
-	case 0:
-		/* Do nothing */
+		exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
 		break;
 	}
 
@@ -989,7 +1022,7 @@
 	if (!first)
 		return;
 
-	reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id;
+	reg = get_unaligned_le32(pkt->header);
 	if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
 		dev_err(dev, "waiting for header FIFO timed out\n");
 		return;
@@ -1001,7 +1034,7 @@
 		dsi->state ^= DSIM_STATE_CMD_LPM;
 	}
 
-	DSI_WRITE(dsi, DSIM_PKTHDR_REG, reg);
+	exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg);
 
 	if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
 		exynos_dsi_force_bta(dsi);
@@ -1017,7 +1050,7 @@
 	u32 reg;
 
 	if (first) {
-		reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+		reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
 
 		switch (reg & 0x3f) {
 		case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
@@ -1056,7 +1089,7 @@
 
 	/* Receive payload */
 	while (length >= 4) {
-		reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+		reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
 		payload[0] = (reg >>  0) & 0xff;
 		payload[1] = (reg >>  8) & 0xff;
 		payload[2] = (reg >> 16) & 0xff;
@@ -1066,7 +1099,7 @@
 	}
 
 	if (length) {
-		reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+		reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
 		switch (length) {
 		case 3:
 			payload[2] = (reg >> 16) & 0xff;
@@ -1085,7 +1118,7 @@
 clear_fifo:
 	length = DSI_RX_FIFO_SIZE / 4;
 	do {
-		reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+		reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
 		if (reg == DSI_RX_FIFO_EMPTY)
 			break;
 	} while (--length);
@@ -1110,13 +1143,14 @@
 
 	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
 
-	if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
+	if (xfer->packet.payload_length &&
+	    xfer->tx_done == xfer->packet.payload_length)
 		/* waiting for RX */
 		return;
 
 	exynos_dsi_send_to_fifo(dsi, xfer);
 
-	if (xfer->tx_len || xfer->rx_len)
+	if (xfer->packet.payload_length || xfer->rx_len)
 		return;
 
 	xfer->result = 0;
@@ -1152,10 +1186,11 @@
 	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
 
 	dev_dbg(dsi->dev,
-		"> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
-		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+		"> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+		xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
+		xfer->rx_done);
 
-	if (xfer->tx_done != xfer->tx_len)
+	if (xfer->tx_done != xfer->packet.payload_length)
 		return true;
 
 	if (xfer->rx_done != xfer->rx_len)
@@ -1226,9 +1261,10 @@
 	wait_for_completion_timeout(&xfer->completed,
 				    msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
 	if (xfer->result == -ETIMEDOUT) {
+		struct mipi_dsi_packet *pkt = &xfer->packet;
 		exynos_dsi_remove_transfer(dsi, xfer);
-		dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data,
-			xfer->tx_len, xfer->tx_payload);
+		dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
+			(int)pkt->payload_length, pkt->payload);
 		return -ETIMEDOUT;
 	}
 
@@ -1241,20 +1277,20 @@
 	struct exynos_dsi *dsi = dev_id;
 	u32 status;
 
-	status = DSI_READ(dsi, DSIM_INTSRC_REG);
+	status = exynos_dsi_read(dsi, DSIM_INTSRC_REG);
 	if (!status) {
 		static unsigned long int j;
 		if (printk_timed_ratelimit(&j, 500))
 			dev_warn(dsi->dev, "spurious interrupt\n");
 		return IRQ_HANDLED;
 	}
-	DSI_WRITE(dsi, DSIM_INTSRC_REG, status);
+	exynos_dsi_write(dsi, DSIM_INTSRC_REG, status);
 
 	if (status & DSIM_INT_SW_RST_RELEASE) {
 		u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
 			DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_FRAME_DONE |
 			DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE);
-		DSI_WRITE(dsi, DSIM_INTMSK_REG, mask);
+		exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
 		complete(&dsi->completed);
 		return IRQ_HANDLED;
 	}
@@ -1401,12 +1437,6 @@
 	return 0;
 }
 
-/* distinguish between short and long DSI packet types */
-static bool exynos_dsi_is_short_dsi_type(u8 type)
-{
-	return (type & 0x0f) <= 8;
-}
-
 static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
 				        const struct mipi_dsi_msg *msg)
 {
@@ -1424,25 +1454,9 @@
 		dsi->state |= DSIM_STATE_INITIALIZED;
 	}
 
-	if (msg->tx_len == 0)
-		return -EINVAL;
-
-	xfer.data_id = msg->type | (msg->channel << 6);
-
-	if (exynos_dsi_is_short_dsi_type(msg->type)) {
-		const char *tx_buf = msg->tx_buf;
-
-		if (msg->tx_len > 2)
-			return -EINVAL;
-		xfer.tx_len = 0;
-		xfer.data[0] = tx_buf[0];
-		xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
-	} else {
-		xfer.tx_len = msg->tx_len;
-		xfer.data[0] = msg->tx_len & 0xff;
-		xfer.data[1] = msg->tx_len >> 8;
-		xfer.tx_payload = msg->tx_buf;
-	}
+	ret = mipi_dsi_create_packet(&xfer.packet, msg);
+	if (ret < 0)
+		return ret;
 
 	xfer.rx_len = msg->rx_len;
 	xfer.rx_payload = msg->rx_buf;
@@ -1597,13 +1611,6 @@
 	return 0;
 }
 
-static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void exynos_dsi_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -1623,7 +1630,6 @@
 }
 
 static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
-	.mode_fixup = exynos_dsi_mode_fixup,
 	.mode_set = exynos_dsi_mode_set,
 	.enable = exynos_dsi_enable,
 	.disable = exynos_dsi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8baabd8..4ae860c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@
 	if (vm_size > exynos_gem->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
+	ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
 			     exynos_gem->dma_addr, exynos_gem->size,
 			     &exynos_gem->dma_attrs);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8a4f4a0..0525c56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -163,7 +163,6 @@
 	u32		clk_frequency;
 	struct regmap	*sysreg;
 	struct fimc_scaler	sc;
-	struct exynos_drm_ipp_pol	pol;
 	int	id;
 	int	irq;
 	bool	suspended;
@@ -260,32 +259,6 @@
 	fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
 }
 
-static void fimc_set_polarity(struct fimc_context *ctx,
-		struct exynos_drm_ipp_pol *pol)
-{
-	u32 cfg;
-
-	DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n",
-		pol->inv_pclk, pol->inv_vsync);
-	DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
-		pol->inv_href, pol->inv_hsync);
-
-	cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
-	cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
-		 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
-
-	if (pol->inv_pclk)
-		cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
-	if (pol->inv_vsync)
-		cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
-	if (pol->inv_href)
-		cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
-	if (pol->inv_hsync)
-		cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
-
-	fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
-}
-
 static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
 {
 	u32 cfg;
@@ -1467,7 +1440,6 @@
 	/* If set ture, we can save jpeg about screen */
 	fimc_handle_jpeg(ctx, false);
 	fimc_set_scaler(ctx, &ctx->sc);
-	fimc_set_polarity(ctx, &ctx->pol);
 
 	switch (cmd) {
 	case IPP_CMD_M2M:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 70194d0..51d484a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -94,12 +94,14 @@
 	unsigned int lcdblk_offset;
 	unsigned int lcdblk_vt_shift;
 	unsigned int lcdblk_bypass_shift;
+	unsigned int lcdblk_mic_bypass_shift;
 
 	unsigned int has_shadowcon:1;
 	unsigned int has_clksel:1;
 	unsigned int has_limited_fmt:1;
 	unsigned int has_vidoutcon:1;
 	unsigned int has_vtsel:1;
+	unsigned int has_mic_bypass:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -145,6 +147,18 @@
 	.has_vtsel = 1,
 };
 
+static struct fimd_driver_data exynos5420_fimd_driver_data = {
+	.timing_base = 0x20000,
+	.lcdblk_offset = 0x214,
+	.lcdblk_vt_shift = 24,
+	.lcdblk_bypass_shift = 15,
+	.lcdblk_mic_bypass_shift = 11,
+	.has_shadowcon = 1,
+	.has_vidoutcon = 1,
+	.has_vtsel = 1,
+	.has_mic_bypass = 1,
+};
+
 struct fimd_context {
 	struct device			*dev;
 	struct drm_device		*drm_dev;
@@ -168,7 +182,6 @@
 	atomic_t			win_updated;
 	atomic_t			triggering;
 
-	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
 	struct drm_encoder *encoder;
 };
@@ -184,6 +197,8 @@
 	  .data = &exynos4415_fimd_driver_data },
 	{ .compatible = "samsung,exynos5250-fimd",
 	  .data = &exynos5_fimd_driver_data },
+	{ .compatible = "samsung,exynos5420-fimd",
+	  .data = &exynos5420_fimd_driver_data },
 	{},
 };
 MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
@@ -380,7 +395,7 @@
 	}
 
 	/* Find the clock divider value that gets us closest to ideal_clk */
-	clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
+	clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk);
 
 	return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
@@ -461,6 +476,18 @@
 		return;
 	}
 
+	/* TODO: When MIC is enabled for display path, the lcdblk_mic_bypass
+	 * bit should be cleared.
+	 */
+	if (driver_data->has_mic_bypass && ctx->sysreg &&
+	    regmap_update_bits(ctx->sysreg,
+				driver_data->lcdblk_offset,
+				0x1 << driver_data->lcdblk_mic_bypass_shift,
+				0x1 << driver_data->lcdblk_mic_bypass_shift)) {
+		DRM_ERROR("Failed to update sysreg for bypass mic.\n");
+		return;
+	}
+
 	/* setup horizontal and vertical display size. */
 	val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
 	       VIDTCON2_HOZVAL(mode->hdisplay - 1) |
@@ -861,7 +888,8 @@
 	 * clock. On these SoCs the bootloader may enable it but any
 	 * power domain off/on will reset it to disable state.
 	 */
-	if (ctx->driver_data != &exynos5_fimd_driver_data)
+	if (ctx->driver_data != &exynos5_fimd_driver_data ||
+	    ctx->driver_data != &exynos5420_fimd_driver_data)
 		return;
 
 	val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 8dfe6e1..193d360 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -259,7 +259,7 @@
 	init_dma_attrs(&g2d->cmdlist_dma_attrs);
 	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
 
-	g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+	g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
 						G2D_CMDLIST_POOL_SIZE,
 						&g2d->cmdlist_pool, GFP_KERNEL,
 						&g2d->cmdlist_dma_attrs);
@@ -293,7 +293,7 @@
 	return 0;
 
 err:
-	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+	dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
 			g2d->cmdlist_pool_virt,
 			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 	return ret;
@@ -306,7 +306,8 @@
 	kfree(g2d->cmdlist_node);
 
 	if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
-		dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+		dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+				G2D_CMDLIST_POOL_SIZE,
 				g2d->cmdlist_pool_virt,
 				g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 	}
@@ -880,7 +881,6 @@
 	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
 	struct drm_exynos_pending_g2d_event *e;
 	struct timeval now;
-	unsigned long flags;
 
 	if (list_empty(&runqueue_node->event_list))
 		return;
@@ -893,10 +893,7 @@
 	e->event.tv_usec = now.tv_usec;
 	e->event.cmdlist_no = cmdlist_no;
 
-	spin_lock_irqsave(&drm_dev->event_lock, flags);
-	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-	wake_up_interruptible(&e->base.file_priv->event_wait);
-	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+	drm_send_event(drm_dev, &e->base);
 }
 
 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
@@ -1072,7 +1069,6 @@
 	struct drm_exynos_pending_g2d_event *e;
 	struct g2d_cmdlist_node *node;
 	struct g2d_cmdlist *cmdlist;
-	unsigned long flags;
 	int size;
 	int ret;
 
@@ -1094,21 +1090,8 @@
 	node->event = NULL;
 
 	if (req->event_type != G2D_EVENT_NOT) {
-		spin_lock_irqsave(&drm_dev->event_lock, flags);
-		if (file->event_space < sizeof(e->event)) {
-			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-			ret = -ENOMEM;
-			goto err;
-		}
-		file->event_space -= sizeof(e->event);
-		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
 		e = kzalloc(sizeof(*node->event), GFP_KERNEL);
 		if (!e) {
-			spin_lock_irqsave(&drm_dev->event_lock, flags);
-			file->event_space += sizeof(e->event);
-			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
 			ret = -ENOMEM;
 			goto err;
 		}
@@ -1116,9 +1099,12 @@
 		e->event.base.type = DRM_EXYNOS_G2D_EVENT;
 		e->event.base.length = sizeof(e->event);
 		e->event.user_data = req->user_data;
-		e->base.event = &e->event.base;
-		e->base.file_priv = file;
-		e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+		ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
+		if (ret) {
+			kfree(e);
+			goto err;
+		}
 
 		node->event = e;
 	}
@@ -1220,12 +1206,8 @@
 err_unmap:
 	g2d_unmap_cmdlist_gem(g2d, node, file);
 err_free_event:
-	if (node->event) {
-		spin_lock_irqsave(&drm_dev->event_lock, flags);
-		file->event_space += sizeof(e->event);
-		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-		kfree(node->event);
-	}
+	if (node->event)
+		drm_event_cancel_free(drm_dev, &node->event->base);
 err:
 	g2d_put_cmdlist(g2d, node);
 	return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 26b5e4b..2914d62 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -65,7 +65,7 @@
 		return -ENOMEM;
 	}
 
-	exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
+	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
 					     &exynos_gem->dma_addr, GFP_KERNEL,
 					     &exynos_gem->dma_attrs);
 	if (!exynos_gem->cookie) {
@@ -73,7 +73,7 @@
 		goto err_free;
 	}
 
-	ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
+	ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
 				    exynos_gem->dma_addr, exynos_gem->size,
 				    &exynos_gem->dma_attrs);
 	if (ret < 0) {
@@ -98,7 +98,7 @@
 err_sgt_free:
 	sg_free_table(&sgt);
 err_dma_free:
-	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
 		       exynos_gem->dma_addr, &exynos_gem->dma_attrs);
 err_free:
 	drm_free_large(exynos_gem->pages);
@@ -118,7 +118,7 @@
 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
 
-	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
 			(dma_addr_t)exynos_gem->dma_addr,
 			&exynos_gem->dma_attrs);
 
@@ -280,6 +280,15 @@
 	return 0;
 }
 
+int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_map *args = data;
+
+	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+					      &args->offset);
+}
+
 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 					unsigned int gem_handle,
 					struct drm_file *filp)
@@ -335,7 +344,7 @@
 	if (vm_size > exynos_gem->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
+	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
 			     exynos_gem->dma_addr, exynos_gem->size,
 			     &exynos_gem->dma_attrs);
 	if (ret < 0) {
@@ -381,7 +390,7 @@
 
 	mutex_lock(&drm_dev->struct_mutex);
 
-	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+	nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
 	if (!nents) {
 		DRM_ERROR("failed to map sgl with dma.\n");
 		mutex_unlock(&drm_dev->struct_mutex);
@@ -396,7 +405,7 @@
 				struct sg_table *sgt,
 				enum dma_data_direction dir)
 {
-	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+	dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
 }
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9ca5047..0022305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -71,6 +71,10 @@
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 
+/* get fake-offset of gem object that can be used with mmap. */
+int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+
 /*
  * get dma address from gem handle and this function could be used for
  * other drivers such as 2d/3d acceleration drivers.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d73b9ad..7ca09ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -9,7 +9,7 @@
  * option) any later version.
  */
 
-#include <drmP.h>
+#include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 
 #include <linux/dma-mapping.h>
@@ -30,7 +30,6 @@
 {
 	struct dma_iommu_mapping *mapping = NULL;
 	struct exynos_drm_private *priv = drm_dev->dev_private;
-	struct device *dev = drm_dev->dev;
 
 	if (!priv->da_start)
 		priv->da_start = EXYNOS_DEV_ADDR_START;
@@ -43,18 +42,9 @@
 	if (IS_ERR(mapping))
 		return PTR_ERR(mapping);
 
-	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
-					GFP_KERNEL);
-	if (!dev->dma_parms)
-		goto error;
-
-	dma_set_max_seg_size(dev, 0xffffffffu);
-	dev->archdata.mapping = mapping;
+	priv->mapping = mapping;
 
 	return 0;
-error:
-	arm_iommu_release_mapping(mapping);
-	return -ENOMEM;
 }
 
 /*
@@ -67,9 +57,9 @@
  */
 void drm_release_iommu_mapping(struct drm_device *drm_dev)
 {
-	struct device *dev = drm_dev->dev;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
 
-	arm_iommu_release_mapping(dev->archdata.mapping);
+	arm_iommu_release_mapping(priv->mapping);
 }
 
 /*
@@ -84,10 +74,10 @@
 int drm_iommu_attach_device(struct drm_device *drm_dev,
 				struct device *subdrv_dev)
 {
-	struct device *dev = drm_dev->dev;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
 	int ret;
 
-	if (!dev->archdata.mapping)
+	if (!priv->mapping)
 		return 0;
 
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
@@ -101,23 +91,12 @@
 	if (subdrv_dev->archdata.mapping)
 		arm_iommu_detach_device(subdrv_dev);
 
-	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+	ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
 	if (ret < 0) {
 		DRM_DEBUG_KMS("failed iommu attach.\n");
 		return ret;
 	}
 
-	/*
-	 * Set dma_ops to drm_device just one time.
-	 *
-	 * The dma mapping api needs device object and the api is used
-	 * to allocate physial memory and map it with iommu table.
-	 * If iommu attach succeeded, the sub driver would have dma_ops
-	 * for iommu and also all sub drivers have same dma_ops.
-	 */
-	if (get_dma_ops(dev) == get_dma_ops(NULL))
-		set_dma_ops(dev, get_dma_ops(subdrv_dev));
-
 	return 0;
 }
 
@@ -133,8 +112,8 @@
 void drm_iommu_detach_device(struct drm_device *drm_dev,
 				struct device *subdrv_dev)
 {
-	struct device *dev = drm_dev->dev;
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+	struct dma_iommu_mapping *mapping = priv->mapping;
 
 	if (!mapping || !mapping->domain)
 		return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index dc1b544..5ffebe0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,9 +29,9 @@
 
 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 {
-	struct device *dev = drm_dev->dev;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
 
-	return dev->archdata.mapping ? true : false;
+	return priv->mapping ? true : false;
 }
 
 #else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 95eeb91..9c84ee7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -618,27 +618,18 @@
 	mutex_unlock(&c_node->mem_lock);
 }
 
-static void ipp_free_event(struct drm_pending_event *event)
-{
-	kfree(event);
-}
-
 static int ipp_get_event(struct drm_device *drm_dev,
 		struct drm_exynos_ipp_cmd_node *c_node,
 		struct drm_exynos_ipp_queue_buf *qbuf)
 {
 	struct drm_exynos_ipp_send_event *e;
-	unsigned long flags;
+	int ret;
 
 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
 
 	e = kzalloc(sizeof(*e), GFP_KERNEL);
-	if (!e) {
-		spin_lock_irqsave(&drm_dev->event_lock, flags);
-		c_node->filp->event_space += sizeof(e->event);
-		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+	if (!e)
 		return -ENOMEM;
-	}
 
 	/* make event */
 	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
@@ -646,9 +637,13 @@
 	e->event.user_data = qbuf->user_data;
 	e->event.prop_id = qbuf->prop_id;
 	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
-	e->base.event = &e->event.base;
-	e->base.file_priv = c_node->filp;
-	e->base.destroy = ipp_free_event;
+
+	ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
+	if (ret) {
+		kfree(e);
+		return ret;
+	}
+
 	mutex_lock(&c_node->event_lock);
 	list_add_tail(&e->base.link, &c_node->event_list);
 	mutex_unlock(&c_node->event_lock);
@@ -1412,7 +1407,6 @@
 	struct drm_exynos_ipp_send_event *e;
 	struct list_head *head;
 	struct timeval now;
-	unsigned long flags;
 	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
 	int ret, i;
 
@@ -1525,10 +1519,7 @@
 	for_each_ipp_ops(i)
 		e->event.buf_id[i] = tbuf_id[i];
 
-	spin_lock_irqsave(&drm_dev->event_lock, flags);
-	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-	wake_up_interruptible(&e->base.file_priv->event_wait);
-	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+	drm_send_event(drm_dev, &e->base);
 	mutex_unlock(&c_node->event_lock);
 
 	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ce59f44..f18fbe4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -20,7 +20,6 @@
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
-#include "exynos_drm.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b605bd7..608b0af 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -412,13 +412,6 @@
 	return 0;
 }
 
-static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
-				 const struct drm_display_mode *mode,
-				 struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void exynos_vidi_mode_set(struct drm_encoder *encoder,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode)
@@ -434,7 +427,6 @@
 }
 
 static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
-	.mode_fixup = exynos_vidi_mode_fixup,
 	.mode_set = exynos_vidi_mode_set,
 	.enable = exynos_vidi_enable,
 	.disable = exynos_vidi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 21a29db..e148d72 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -867,10 +867,8 @@
 {
 	u32 hdr_sum;
 	u8 chksum;
-	u32 mod;
 	u8 ar;
 
-	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
 	if (hdata->dvi_mode) {
 		hdmi_reg_writeb(hdata, HDMI_VSI_CON,
 				HDMI_VSI_CON_DO_NOT_TRANSMIT);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index d8ab8f0..4ed7798 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -42,41 +42,24 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-	int ret;
 
-	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-				 DCU_MODE_DCU_MODE_MASK,
-				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
-	if (ret)
-		dev_err(fsl_dev->dev, "Disable CRTC failed\n");
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_DCU_MODE_MASK,
+			   DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 }
 
 static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-	int ret;
 
-	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-				 DCU_MODE_DCU_MODE_MASK,
-				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
-	if (ret)
-		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
-}
-
-static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
-{
-	return true;
+	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_DCU_MODE_MASK,
+			   DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 }
 
 static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -84,9 +67,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	struct drm_display_mode *mode = &crtc->state->mode;
-	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index, pol = 0;
 	unsigned long dcuclk;
-	int ret;
 
 	index = drm_crtc_index(crtc);
 	dcuclk = clk_get_rate(fsl_dev->clk);
@@ -100,51 +82,36 @@
 	vfp = mode->vsync_start - mode->vdisplay;
 	vsw = mode->vsync_end - mode->vsync_start;
 
-	ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
-			   DCU_HSYN_PARA_BP(hbp) |
-			   DCU_HSYN_PARA_PW(hsw) |
-			   DCU_HSYN_PARA_FP(hfp));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
-			   DCU_VSYN_PARA_BP(vbp) |
-			   DCU_VSYN_PARA_PW(vsw) |
-			   DCU_VSYN_PARA_FP(vfp));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
-			   DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
-			   DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
-			   DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
-			   DCU_BGND_G(0) | DCU_BGND_B(0));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
-			   DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
-			   DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
-			   DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
-			   DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		goto set_failed;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		pol |= DCU_SYN_POL_INV_HS_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		pol |= DCU_SYN_POL_INV_VS_LOW;
+
+	regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+		     DCU_HSYN_PARA_BP(hbp) |
+		     DCU_HSYN_PARA_PW(hsw) |
+		     DCU_HSYN_PARA_FP(hfp));
+	regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+		     DCU_VSYN_PARA_BP(vbp) |
+		     DCU_VSYN_PARA_PW(vsw) |
+		     DCU_VSYN_PARA_FP(vfp));
+	regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+		     DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+		     DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+	regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+	regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
+	regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+		     DCU_BGND_G(0) | DCU_BGND_B(0));
+	regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+		     DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+	regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+		     DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+		     DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+		     DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 	return;
-set_failed:
-	dev_err(dev->dev, "set DCU register failed\n");
 }
 
 static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
@@ -153,7 +120,6 @@
 	.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
 	.disable = fsl_dcu_drm_disable_crtc,
 	.enable = fsl_dcu_drm_crtc_enable,
-	.mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
 	.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
 };
 
@@ -174,10 +140,15 @@
 	int ret;
 
 	primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+	if (!primary)
+		return -ENOMEM;
+
 	ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
 					&fsl_dcu_drm_crtc_funcs, NULL);
-	if (ret < 0)
+	if (ret) {
+		primary->funcs->destroy(primary);
 		return ret;
+	}
 
 	drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
 
@@ -185,26 +156,15 @@
 		reg_num = LS1021A_LAYER_REG_NUM;
 	else
 		reg_num = VF610_LAYER_REG_NUM;
-	for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
-		for (j = 0; j < reg_num; j++) {
-			ret = regmap_write(fsl_dev->regmap,
-					   DCU_CTRLDESCLN(i, j), 0);
-			if (ret)
-				goto init_failed;
-		}
+	for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+		for (j = 1; j <= reg_num; j++)
+			regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
 	}
-	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-				 DCU_MODE_DCU_MODE_MASK,
-				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
-	if (ret)
-		goto init_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		goto init_failed;
+	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_DCU_MODE_MASK,
+			   DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 
 	return 0;
-init_failed:
-	dev_err(fsl_dev->dev, "init DCU register failed\n");
-	return ret;
 }
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index fca97d3..e8d9337 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -28,37 +28,36 @@
 #include "fsl_dcu_drm_crtc.h"
 #include "fsl_dcu_drm_drv.h"
 
+static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE)
+		return true;
+
+	return false;
+}
+
 static const struct regmap_config fsl_dcu_regmap_config = {
 	.reg_bits = 32,
 	.reg_stride = 4,
 	.val_bits = 32,
 	.cache_type = REGCACHE_RBTREE,
+
+	.volatile_reg = fsl_dcu_drm_is_volatile_reg,
 };
 
 static int fsl_dcu_drm_irq_init(struct drm_device *dev)
 {
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-	unsigned int value;
 	int ret;
 
 	ret = drm_irq_install(dev, fsl_dev->irq);
 	if (ret < 0)
 		dev_err(dev->dev, "failed to install IRQ handler\n");
 
-	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
-	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
-	if (ret)
-		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
-	value &= DCU_INT_MASK_VBLANK;
-	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+	regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+	regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 
 	return ret;
 }
@@ -112,10 +111,6 @@
 	return 0;
 }
 
-static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-}
-
 static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -124,18 +119,17 @@
 	int ret;
 
 	ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	if (ret) {
+		dev_err(dev->dev, "read DCU_INT_STATUS failed\n");
+		return IRQ_NONE;
+	}
+
 	if (int_status & DCU_INT_STATUS_VBLANK)
 		drm_handle_vblank(dev, 0);
 
-	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
-	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-			   DCU_UPDATE_MODE_READREG);
-	if (ret)
-		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+	regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
+	regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+		     DCU_UPDATE_MODE_READREG);
 
 	return IRQ_HANDLED;
 }
@@ -144,15 +138,11 @@
 {
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	unsigned int value;
-	int ret;
 
-	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
-	if (ret)
-		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
 	value &= ~DCU_INT_MASK_VBLANK;
-	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+
 	return 0;
 }
 
@@ -161,15 +151,10 @@
 {
 	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 	unsigned int value;
-	int ret;
 
-	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
-	if (ret)
-		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
 	value |= DCU_INT_MASK_VBLANK;
-	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
-	if (ret)
-		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
 }
 
 static const struct file_operations fsl_dcu_drm_fops = {
@@ -191,7 +176,6 @@
 				| DRIVER_PRIME | DRIVER_ATOMIC,
 	.load			= fsl_dcu_load,
 	.unload			= fsl_dcu_unload,
-	.preclose		= fsl_dcu_drm_preclose,
 	.irq_handler		= fsl_dcu_drm_irq,
 	.get_vblank_counter	= drm_vblank_no_hw_counter,
 	.enable_vblank		= fsl_dcu_drm_enable_vblank,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 579b9e4..6413ac9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -133,7 +133,9 @@
 #define DCU_LAYER_RLE_EN		BIT(15)
 #define DCU_LAYER_LUOFFS(x)		((x) << 4)
 #define DCU_LAYER_BB_ON			BIT(2)
-#define DCU_LAYER_AB(x)			(x)
+#define DCU_LAYER_AB_NONE		0
+#define DCU_LAYER_AB_CHROMA_KEYING	1
+#define DCU_LAYER_AB_WHOLE_FRAME	2
 
 #define DCU_LAYER_CKMAX_R(x)		((x) << 16)
 #define DCU_LAYER_CKMAX_G(x)		((x) << 8)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index 0ef5959..c564ec6 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -25,6 +25,8 @@
 
 int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
 {
+	int ret;
+
 	drm_mode_config_init(fsl_dev->drm);
 
 	fsl_dev->drm->mode_config.min_width = 0;
@@ -33,11 +35,25 @@
 	fsl_dev->drm->mode_config.max_height = 2047;
 	fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
 
-	drm_kms_helper_poll_init(fsl_dev->drm);
-	fsl_dcu_drm_crtc_create(fsl_dev);
-	fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
-	fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+	ret = fsl_dcu_drm_crtc_create(fsl_dev);
+	if (ret)
+		return ret;
+
+	ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+	if (ret)
+		goto fail_encoder;
+
+	ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+	if (ret)
+		goto fail_connector;
+
 	drm_mode_config_reset(fsl_dev->drm);
+	drm_kms_helper_poll_init(fsl_dev->drm);
 
 	return 0;
+fail_encoder:
+	fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc);
+fail_connector:
+	fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
+	return ret;
 }
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 4b13cf9..274558b 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -41,11 +41,17 @@
 {
 	struct drm_framebuffer *fb = state->fb;
 
+	if (!state->fb || !state->crtc)
+		return 0;
+
 	switch (fb->pixel_format) {
 	case DRM_FORMAT_RGB565:
 	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_ARGB8888:
-	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_XRGB1555:
 	case DRM_FORMAT_ARGB1555:
 	case DRM_FORMAT_YUV422:
 		return 0;
@@ -59,19 +65,15 @@
 {
 	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
 	unsigned int value;
-	int index, ret;
+	int index;
 
 	index = fsl_dcu_drm_plane_index(plane);
 	if (index < 0)
 		return;
 
-	ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
-	if (ret)
-		dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+	regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
 	value &= ~DCU_LAYER_EN;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
-	if (ret)
-		dev_err(fsl_dev->dev, "set DCU register failed\n");
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
 }
 
 static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
@@ -82,8 +84,8 @@
 	struct drm_plane_state *state = plane->state;
 	struct drm_framebuffer *fb = plane->state->fb;
 	struct drm_gem_cma_object *gem;
-	unsigned int alpha, bpp;
-	int index, ret;
+	unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
+	int index;
 
 	if (!fb)
 		return;
@@ -97,96 +99,74 @@
 	switch (fb->pixel_format) {
 	case DRM_FORMAT_RGB565:
 		bpp = FSL_DCU_RGB565;
-		alpha = 0xff;
 		break;
 	case DRM_FORMAT_RGB888:
 		bpp = FSL_DCU_RGB888;
-		alpha = 0xff;
 		break;
 	case DRM_FORMAT_ARGB8888:
+		alpha = DCU_LAYER_AB_WHOLE_FRAME;
+		/* fall-through */
+	case DRM_FORMAT_XRGB8888:
 		bpp = FSL_DCU_ARGB8888;
-		alpha = 0xff;
 		break;
-	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_ARGB4444:
+		alpha = DCU_LAYER_AB_WHOLE_FRAME;
+		/* fall-through */
+	case DRM_FORMAT_XRGB4444:
 		bpp = FSL_DCU_ARGB4444;
-		alpha = 0xff;
 		break;
 	case DRM_FORMAT_ARGB1555:
+		alpha = DCU_LAYER_AB_WHOLE_FRAME;
+		/* fall-through */
+	case DRM_FORMAT_XRGB1555:
 		bpp = FSL_DCU_ARGB1555;
-		alpha = 0xff;
 		break;
 	case DRM_FORMAT_YUV422:
 		bpp = FSL_DCU_YUV422;
-		alpha = 0xff;
 		break;
 	default:
 		return;
 	}
 
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
-			   DCU_LAYER_HEIGHT(state->crtc_h) |
-			   DCU_LAYER_WIDTH(state->crtc_w));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
-			   DCU_LAYER_POSY(state->crtc_y) |
-			   DCU_LAYER_POSX(state->crtc_x));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap,
-			   DCU_CTRLDESCLN(index, 3), gem->paddr);
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
-			   DCU_LAYER_EN |
-			   DCU_LAYER_TRANS(alpha) |
-			   DCU_LAYER_BPP(bpp) |
-			   DCU_LAYER_AB(0));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
-			   DCU_LAYER_CKMAX_R(0xFF) |
-			   DCU_LAYER_CKMAX_G(0xFF) |
-			   DCU_LAYER_CKMAX_B(0xFF));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
-			   DCU_LAYER_CKMIN_R(0) |
-			   DCU_LAYER_CKMIN_G(0) |
-			   DCU_LAYER_CKMIN_B(0));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
-			   DCU_LAYER_FG_FCOLOR(0));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
-			   DCU_LAYER_BG_BCOLOR(0));
-	if (ret)
-		goto set_failed;
-	if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
-		ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
-				   DCU_LAYER_POST_SKIP(0) |
-				   DCU_LAYER_PRE_SKIP(0));
-		if (ret)
-			goto set_failed;
-	}
-	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-				 DCU_MODE_DCU_MODE_MASK,
-				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
-	if (ret)
-		goto set_failed;
-	ret = regmap_write(fsl_dev->regmap,
-			   DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
-	if (ret)
-		goto set_failed;
-	return;
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+		     DCU_LAYER_HEIGHT(state->crtc_h) |
+		     DCU_LAYER_WIDTH(state->crtc_w));
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+		     DCU_LAYER_POSY(state->crtc_y) |
+		     DCU_LAYER_POSX(state->crtc_x));
+	regmap_write(fsl_dev->regmap,
+		     DCU_CTRLDESCLN(index, 3), gem->paddr);
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+		     DCU_LAYER_EN |
+		     DCU_LAYER_TRANS(0xff) |
+		     DCU_LAYER_BPP(bpp) |
+		     alpha);
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+		     DCU_LAYER_CKMAX_R(0xFF) |
+		     DCU_LAYER_CKMAX_G(0xFF) |
+		     DCU_LAYER_CKMAX_B(0xFF));
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+		     DCU_LAYER_CKMIN_R(0) |
+		     DCU_LAYER_CKMIN_G(0) |
+		     DCU_LAYER_CKMIN_B(0));
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+		     DCU_LAYER_FG_FCOLOR(0));
+	regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+		     DCU_LAYER_BG_BCOLOR(0));
 
-set_failed:
-	dev_err(fsl_dev->dev, "set DCU register failed\n");
+	if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
+		regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+			     DCU_LAYER_POST_SKIP(0) |
+			     DCU_LAYER_PRE_SKIP(0));
+	}
+	regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_DCU_MODE_MASK,
+			   DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	regmap_write(fsl_dev->regmap,
+		     DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+
+	return;
 }
 
 static void
@@ -213,6 +193,7 @@
 static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
 {
 	drm_plane_cleanup(plane);
+	kfree(plane);
 }
 
 static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
@@ -227,8 +208,11 @@
 static const u32 fsl_dcu_drm_plane_formats[] = {
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_RGB888,
+	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB4444,
 	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_XRGB1555,
 	DRM_FORMAT_ARGB1555,
 	DRM_FORMAT_YUV422,
 };
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index d0717a8..b837e7a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -217,7 +217,6 @@
 
 static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
 	.dpms = cdv_intel_crt_dpms,
-	.mode_fixup = gma_encoder_mode_fixup,
 	.prepare = gma_encoder_prepare,
 	.commit = gma_encoder_commit,
 	.mode_set = cdv_intel_crt_mode_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 6126546..17db4b4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -116,7 +116,7 @@
 	 .p1 = {.min = 1, .max = 10},
 	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
 	 .find_pll = cdv_intel_find_dp_pll,
-	 }	
+	}
 };
 
 #define _wait_for(COND, MS, W) ({ \
@@ -245,7 +245,7 @@
 	/* We don't know what the other fields of these regs are, so
 	 * leave them in place.
 	 */
-	/* 
+	/*
 	 * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
 	 * for the pipe A/B. Display spec 1.06 has wrong definition.
 	 * Correct definition is like below:
@@ -256,7 +256,7 @@
 	 *
 	 * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
 	 *
-	 */  
+	 */
 	ret = cdv_sb_read(dev, ref_sfr, &ref_value);
 	if (ret)
 		return ret;
@@ -646,7 +646,7 @@
 		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
 		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
 		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
-		 */ 
+		 */
 		if (pipe == 0)
 			refclk = 27000;
 		else
@@ -659,7 +659,7 @@
 	}
 
 	drm_mode_debug_printmodeline(adjusted_mode);
-	
+
 	limit = gma_crtc->clock_funcs->limit(crtc, refclk);
 
 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
@@ -721,7 +721,7 @@
 			pipeconf |= PIPE_6BPC;
 	} else
 		pipeconf |= PIPE_8BPC;
-			
+
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -974,7 +974,6 @@
 
 const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
 	.dpms = gma_crtc_dpms,
-	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = cdv_intel_crtc_mode_set,
 	.mode_set_base = gma_pipe_set_base,
 	.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index ddf2d77..28f9d90 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -255,7 +255,6 @@
 
 static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
 	.dpms = cdv_hdmi_dpms,
-	.mode_fixup = gma_encoder_mode_fixup,
 	.prepare = gma_encoder_prepare,
 	.mode_set = cdv_hdmi_mode_set,
 	.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb95765..033d894 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -674,29 +674,17 @@
 	.output_poll_changed = psbfb_output_poll_changed,
 };
 
-static int psb_create_backlight_property(struct drm_device *dev)
-{
-	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct drm_property *backlight;
-
-	if (dev_priv->backlight_property)
-		return 0;
-
-	backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
-
-	dev_priv->backlight_property = backlight;
-
-	return 0;
-}
-
 static void psb_setup_outputs(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_connector *connector;
 
 	drm_mode_create_scaling_mode_property(dev);
-	psb_create_backlight_property(dev);
 
+	/* It is ok for this to fail - we just don't get backlight control */
+	if (!dev_priv->backlight_property)
+		dev_priv->backlight_property = drm_property_create_range(dev, 0,
+							"backlight", 0, 100);
 	dev_priv->ops->output_init(dev);
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list,
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index ff17af4..5bf765d 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -478,20 +478,6 @@
 	return 0;
 }
 
-bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
-			    const struct drm_display_mode *mode,
-			    struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
-bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
-			 const struct drm_display_mode *mode,
-			 struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 void gma_crtc_prepare(struct drm_crtc *crtc)
 {
 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index ed569d8..b2491c6 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -75,9 +75,6 @@
 extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 			       u16 *blue, u32 start, u32 size);
 extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
-extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
 extern void gma_crtc_prepare(struct drm_crtc *crtc);
 extern void gma_crtc_commit(struct drm_crtc *crtc);
 extern void gma_crtc_disable(struct drm_crtc *crtc);
@@ -90,9 +87,6 @@
 extern void gma_encoder_prepare(struct drm_encoder *encoder);
 extern void gma_encoder_commit(struct drm_encoder *encoder);
 extern void gma_encoder_destroy(struct drm_encoder *encoder);
-extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
-				   const struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode);
 
 /* Common clock related functions */
 extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 566d330..e7e2218 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -436,7 +436,7 @@
 	return 0;
 
 err:
-	while (--i) {
+	while (i--) {
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 		i2c_del_adapter(&bus->adapter);
 	}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index d758f4c..907cb51 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -382,16 +382,6 @@
 	return MODE_OK;
 }
 
-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-	if (mode == connector->dpms)
-		return;
-
-	/*first, execute dpms*/
-
-	drm_helper_connector_dpms(connector, mode);
-}
-
 static struct drm_encoder *mdfld_dsi_connector_best_encoder(
 				struct drm_connector *connector)
 {
@@ -404,7 +394,7 @@
 
 /*DSI connector funcs*/
 static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
-	.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+	.dpms = drm_helper_connector_dpms,
 	.detect = mdfld_dsi_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = mdfld_dsi_connector_set_property,
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index acd3834..92e3f93e 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -1026,10 +1026,8 @@
 
 const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
 	.dpms = mdfld_crtc_dpms,
-	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = mdfld_crtc_mode_set,
 	.mode_set_base = mdfld__intel_pipe_set_base,
 	.prepare = gma_crtc_prepare,
 	.commit = gma_crtc_commit,
 };
-
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 1048f0c..da9fd34 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -657,7 +657,6 @@
 
 const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
 	.dpms = oaktrail_crtc_dpms,
-	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = oaktrail_crtc_mode_set,
 	.mode_set_base = oaktrail_pipe_set_base,
 	.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2d18499..8b2eb32 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -601,7 +601,6 @@
 
 static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
 	.dpms = oaktrail_hdmi_dpms,
-	.mode_fixup = gma_encoder_mode_fixup,
 	.prepare = gma_encoder_prepare,
 	.mode_set = oaktrail_hdmi_mode_set,
 	.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 92e7e57..4e1c685 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -442,14 +442,6 @@
 	/* FIXME: do we need to wrap the other side of this */
 }
 
-/*
- * When a client dies:
- *    - Check for and clean up flipped page state
- */
-static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
 static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	return drm_get_pci_dev(pdev, ent, &driver);
@@ -495,7 +487,6 @@
 	.load = psb_driver_load,
 	.unload = psb_driver_unload,
 	.lastclose = psb_driver_lastclose,
-	.preclose = psb_driver_preclose,
 	.set_busid = drm_pci_set_busid,
 
 	.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index dcdbc37..398015b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -430,7 +430,6 @@
 
 const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
 	.dpms = gma_crtc_dpms,
-	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = psb_intel_crtc_mode_set,
 	.mode_set_base = gma_pipe_set_base,
 	.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 90db5f4..0594c45 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -253,6 +253,8 @@
 	drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
 
 	priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+	if (!priv->scale_property)
+		return -ENOMEM;
 
 	drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
 				      priv->select_subconnector);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index c400428..db0b03f 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -252,14 +252,6 @@
 				     priv->saved_slave_state);
 }
 
-static bool
-sil164_encoder_mode_fixup(struct drm_encoder *encoder,
-			  const struct drm_display_mode *mode,
-			  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static int
 sil164_encoder_mode_valid(struct drm_encoder *encoder,
 			  struct drm_display_mode *mode)
@@ -347,7 +339,6 @@
 	.dpms = sil164_encoder_dpms,
 	.save = sil164_encoder_save,
 	.restore = sil164_encoder_restore,
-	.mode_fixup = sil164_encoder_mode_fixup,
 	.mode_valid = sil164_encoder_mode_valid,
 	.mode_set = sil164_encoder_mode_set,
 	.detect = sil164_encoder_detect,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f8ee740..f4315bc 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -856,14 +856,6 @@
 	priv->dpms = mode;
 }
 
-static bool
-tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
-			  const struct drm_display_mode *mode,
-			  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static int tda998x_connector_mode_valid(struct drm_connector *connector,
 					struct drm_display_mode *mode)
 {
@@ -1343,7 +1335,6 @@
 
 static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
 	.dpms = tda998x_encoder_dpms,
-	.mode_fixup = tda998x_encoder_mode_fixup,
 	.prepare = tda998x_encoder_prepare,
 	.commit = tda998x_encoder_commit,
 	.mode_set = tda998x_encoder_mode_set,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 051eab3..20a5d04 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -2,9 +2,7 @@
 	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
 	depends on DRM
 	depends on X86 && PCI
-	depends on (AGP || AGP=n)
 	select INTEL_GTT
-	select AGP_INTEL if AGP
 	select INTERVAL_TREE
 	# we need shmfs for the swappable backing store, and in particular
 	# the shmem_readpage() which depends upon tmpfs
@@ -47,3 +45,14 @@
 	  option changes the default for that module option.
 
 	  If in doubt, say "N".
+
+config DRM_I915_USERPTR
+	bool "Always enable userptr support"
+	depends on DRM_I915
+	select MMU_NOTIFIER
+	default y
+	help
+	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
+	  selected to enabled full userptr support.
+
+	  If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cf39ed3..a0f1bd7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,9 +117,8 @@
 	u64 size = 0;
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (i915_is_ggtt(vma->vm) &&
-		    drm_mm_node_allocated(&vma->node))
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
 	}
 
@@ -155,7 +154,7 @@
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (vma->pin_count > 0)
 			pin_count++;
 	}
@@ -164,14 +163,13 @@
 		seq_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
-			   i915_is_ggtt(vma->vm) ? "g" : "pp",
+			   vma->is_ggtt ? "g" : "pp",
 			   vma->node.start, vma->node.size);
-		if (i915_is_ggtt(vma->vm))
-			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
-		else
-			seq_puts(m, ")");
+		if (vma->is_ggtt)
+			seq_printf(m, ", type: %u", vma->ggtt_view.type);
+		seq_puts(m, ")");
 	}
 	if (obj->stolen)
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -230,7 +228,7 @@
 	}
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(vma, head, mm_list) {
+	list_for_each_entry(vma, head, vm_link) {
 		seq_printf(m, "   ");
 		describe_obj(m, vma->obj);
 		seq_printf(m, "\n");
@@ -342,13 +340,13 @@
 		stats->shared += obj->base.size;
 
 	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			if (i915_is_ggtt(vma->vm)) {
+			if (vma->is_ggtt) {
 				stats->global += obj->base.size;
 				continue;
 			}
@@ -454,12 +452,12 @@
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->active_list, mm_list);
+	count_vmas(&vm->active_list, vm_link);
 	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
-	count_vmas(&vm->inactive_list, mm_list);
+	count_vmas(&vm->inactive_list, vm_link);
 	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
@@ -1336,7 +1334,8 @@
 	struct intel_engine_cs *ring;
 	u64 acthd[I915_NUM_RINGS];
 	u32 seqno[I915_NUM_RINGS];
-	int i;
+	u32 instdone[I915_NUM_INSTDONE_REG];
+	int i, j;
 
 	if (!i915.enable_hangcheck) {
 		seq_printf(m, "Hangcheck disabled\n");
@@ -1350,6 +1349,8 @@
 		acthd[i] = intel_ring_get_active_head(ring);
 	}
 
+	i915_get_extra_instdone(dev, instdone);
+
 	intel_runtime_pm_put(dev_priv);
 
 	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
@@ -1370,6 +1371,21 @@
 			   (long long)ring->hangcheck.max_acthd);
 		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
 		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+
+		if (ring->id == RCS) {
+			seq_puts(m, "\tinstdone read =");
+
+			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+				seq_printf(m, " 0x%08x", instdone[j]);
+
+			seq_puts(m, "\n\tinstdone accu =");
+
+			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+				seq_printf(m, " 0x%08x",
+					   ring->hangcheck.instdone[j]);
+
+			seq_puts(m, "\n");
+		}
 	}
 
 	return 0;
@@ -1947,11 +1963,8 @@
 
 		seq_puts(m, "HW context ");
 		describe_ctx(m, ctx);
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->default_context == ctx)
-				seq_printf(m, "(default context %s) ",
-					   ring->name);
-		}
+		if (ctx == dev_priv->kernel_context)
+			seq_printf(m, "(kernel context) ");
 
 		if (i915.enable_execlists) {
 			seq_putc(m, '\n');
@@ -1981,12 +1994,13 @@
 }
 
 static void i915_dump_lrc_obj(struct seq_file *m,
-			      struct intel_engine_cs *ring,
-			      struct drm_i915_gem_object *ctx_obj)
+			      struct intel_context *ctx,
+			      struct intel_engine_cs *ring)
 {
 	struct page *page;
 	uint32_t *reg_state;
 	int j;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
 	unsigned long ggtt_offset = 0;
 
 	if (ctx_obj == NULL) {
@@ -1996,7 +2010,7 @@
 	}
 
 	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
-		   intel_execlists_ctx_id(ctx_obj));
+		   intel_execlists_ctx_id(ctx, ring));
 
 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
 		seq_puts(m, "\tNot bound in GGTT\n");
@@ -2042,13 +2056,10 @@
 	if (ret)
 		return ret;
 
-	list_for_each_entry(ctx, &dev_priv->context_list, link) {
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->default_context != ctx)
-				i915_dump_lrc_obj(m, ring,
-						  ctx->engine[i].state);
-		}
-	}
+	list_for_each_entry(ctx, &dev_priv->context_list, link)
+		if (ctx != dev_priv->kernel_context)
+			for_each_ring(ring, dev_priv, i)
+				i915_dump_lrc_obj(m, ctx, ring);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -2097,13 +2108,13 @@
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
 		read_pointer = ring->next_context_status_buffer;
-		write_pointer = status_pointer & 0x07;
+		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 		if (read_pointer > write_pointer)
-			write_pointer += 6;
+			write_pointer += GEN8_CSB_ENTRIES;
 		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
 			   read_pointer, write_pointer);
 
-		for (i = 0; i < 6; i++) {
+		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
 			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
 			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
 
@@ -2120,11 +2131,8 @@
 
 		seq_printf(m, "\t%d requests in queue\n", count);
 		if (head_req) {
-			struct drm_i915_gem_object *ctx_obj;
-
-			ctx_obj = head_req->ctx->engine[ring_id].state;
 			seq_printf(m, "\tHead request id: %u\n",
-				   intel_execlists_ctx_id(ctx_obj));
+				   intel_execlists_ctx_id(head_req->ctx, ring));
 			seq_printf(m, "\tHead request tail: %u\n",
 				   head_req->tail);
 		}
@@ -2458,9 +2466,9 @@
 
 	for_each_ring(ring, dev_priv, i) {
 		seq_printf(m, "\tSubmissions: %llu %s\n",
-				client->submissions[i],
+				client->submissions[ring->guc_id],
 				ring->name);
-		tot += client->submissions[i];
+		tot += client->submissions[ring->guc_id];
 	}
 	seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2497,10 +2505,10 @@
 
 	seq_printf(m, "\nGuC submissions:\n");
 	for_each_ring(ring, dev_priv, i) {
-		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
-			ring->name, guc.submissions[i],
-			guc.last_seqno[i], guc.last_seqno[i]);
-		total += guc.submissions[i];
+		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
+			ring->name, guc.submissions[ring->guc_id],
+			guc.last_seqno[ring->guc_id]);
+		total += guc.submissions[ring->guc_id];
 	}
 	seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2578,6 +2586,10 @@
 				enabled = true;
 		}
 	}
+
+	seq_printf(m, "Main link in standby mode: %s\n",
+		   yesno(dev_priv->psr.link_standby));
+
 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
 	if (!HAS_DDI(dev))
@@ -3216,9 +3228,11 @@
 {
 	int i;
 	int ret;
+	struct intel_engine_cs *ring;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_workarounds *workarounds = &dev_priv->workarounds;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -3226,15 +3240,18 @@
 
 	intel_runtime_pm_get(dev_priv);
 
-	seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
-	for (i = 0; i < dev_priv->workarounds.count; ++i) {
+	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
+	for_each_ring(ring, dev_priv, i)
+		seq_printf(m, "HW whitelist count for %s: %d\n",
+			   ring->name, workarounds->hw_whitelist_count[i]);
+	for (i = 0; i < workarounds->count; ++i) {
 		i915_reg_t addr;
 		u32 mask, value, read;
 		bool ok;
 
-		addr = dev_priv->workarounds.reg[i].addr;
-		mask = dev_priv->workarounds.reg[i].mask;
-		value = dev_priv->workarounds.reg[i].value;
+		addr = workarounds->reg[i].addr;
+		mask = workarounds->reg[i].mask;
+		value = workarounds->reg[i].value;
 		read = I915_READ(addr);
 		ok = (value & mask) == (read & mask);
 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d70d96f..1c6d227 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -391,20 +391,13 @@
 	if (ret)
 		goto cleanup_vga_client;
 
-	/* Initialise stolen first so that we may reserve preallocated
-	 * objects for the BIOS to KMS transition.
-	 */
-	ret = i915_gem_init_stolen(dev);
-	if (ret)
-		goto cleanup_vga_switcheroo;
-
 	intel_power_domains_init_hw(dev_priv, false);
 
 	intel_csr_ucode_init(dev_priv);
 
 	ret = intel_irq_install(dev_priv);
 	if (ret)
-		goto cleanup_gem_stolen;
+		goto cleanup_csr;
 
 	intel_setup_gmbus(dev);
 
@@ -458,9 +451,8 @@
 	intel_guc_ucode_fini(dev);
 	drm_irq_uninstall(dev);
 	intel_teardown_gmbus(dev);
-cleanup_gem_stolen:
-	i915_gem_cleanup_stolen(dev);
-cleanup_vga_switcheroo:
+cleanup_csr:
+	intel_csr_ucode_fini(dev_priv);
 	vga_switcheroo_unregister_client(dev->pdev);
 cleanup_vga_client:
 	vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -816,7 +808,41 @@
 		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
 			DRM_INFO("Display fused off, disabling\n");
 			info->num_pipes = 0;
+		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+			DRM_INFO("PipeC fused off\n");
+			info->num_pipes -= 1;
 		}
+	} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+		u32 dfsm = I915_READ(SKL_DFSM);
+		u8 disabled_mask = 0;
+		bool invalid;
+		int num_bits;
+
+		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+			disabled_mask |= BIT(PIPE_A);
+		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+			disabled_mask |= BIT(PIPE_B);
+		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+			disabled_mask |= BIT(PIPE_C);
+
+		num_bits = hweight8(disabled_mask);
+
+		switch (disabled_mask) {
+		case BIT(PIPE_A):
+		case BIT(PIPE_B):
+		case BIT(PIPE_A) | BIT(PIPE_B):
+		case BIT(PIPE_A) | BIT(PIPE_C):
+			invalid = true;
+			break;
+		default:
+			invalid = false;
+		}
+
+		if (num_bits > info->num_pipes || invalid)
+			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+				  disabled_mask);
+		else
+			info->num_pipes -= num_bits;
 	}
 
 	/* Initialize slice/subslice/EU info */
@@ -855,6 +881,94 @@
 	}
 }
 
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+	/*
+	 * The i915 workqueue is primarily used for batched retirement of
+	 * requests (and thus managing bo) once the task has been completed
+	 * by the GPU. i915_gem_retire_requests() is called directly when we
+	 * need high-priority retirement, such as waiting for an explicit
+	 * bo.
+	 *
+	 * It is also used for periodic low-priority events, such as
+	 * idle-timers and recording error state.
+	 *
+	 * All tasks on the workqueue are expected to acquire the dev mutex
+	 * so there is no point in running more than one instance of the
+	 * workqueue at any time.  Use an ordered one.
+	 */
+	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+	if (dev_priv->wq == NULL)
+		goto out_err;
+
+	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->hotplug.dp_wq == NULL)
+		goto out_free_wq;
+
+	dev_priv->gpu_error.hangcheck_wq =
+		alloc_ordered_workqueue("i915-hangcheck", 0);
+	if (dev_priv->gpu_error.hangcheck_wq == NULL)
+		goto out_free_dp_wq;
+
+	return 0;
+
+out_free_dp_wq:
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
+out_free_wq:
+	destroy_workqueue(dev_priv->wq);
+out_err:
+	DRM_ERROR("Failed to allocate workqueues.\n");
+
+	return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
+	destroy_workqueue(dev_priv->wq);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int mmio_bar;
+	int mmio_size;
+
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	/*
+	 * Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (INTEL_INFO(dev)->gen < 5)
+		mmio_size = 512 * 1024;
+	else
+		mmio_size = 2 * 1024 * 1024;
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+	if (dev_priv->regs == NULL) {
+		DRM_ERROR("failed to map registers\n");
+
+		return -EIO;
+	}
+
+	/* Try to make sure MCHBAR is enabled before poking at it */
+	intel_setup_mchbar(dev);
+
+	return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	intel_teardown_mchbar(dev);
+	pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -870,7 +984,7 @@
 {
 	struct drm_i915_private *dev_priv;
 	struct intel_device_info *info, *device_info;
-	int ret = 0, mmio_bar, mmio_size;
+	int ret = 0;
 	uint32_t aperture_size;
 
 	info = (struct intel_device_info *) flags;
@@ -897,6 +1011,10 @@
 	mutex_init(&dev_priv->modeset_restore_lock);
 	mutex_init(&dev_priv->av_mutex);
 
+	ret = i915_workqueues_init(dev_priv);
+	if (ret < 0)
+		goto out_free_priv;
+
 	intel_pm_setup(dev);
 
 	intel_runtime_pm_get(dev_priv);
@@ -915,28 +1033,12 @@
 
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
-		goto free_priv;
+		goto out_runtime_pm_put;
 	}
 
-	mmio_bar = IS_GEN2(dev) ? 1 : 0;
-	/* Before gen4, the registers and the GTT are behind different BARs.
-	 * However, from gen4 onwards, the registers and the GTT are shared
-	 * in the same BAR, so we want to restrict this ioremap from
-	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-	 * the register BAR remains the same size for all the earlier
-	 * generations up to Ironlake.
-	 */
-	if (info->gen < 5)
-		mmio_size = 512*1024;
-	else
-		mmio_size = 2*1024*1024;
-
-	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
-	if (!dev_priv->regs) {
-		DRM_ERROR("failed to map registers\n");
-		ret = -EIO;
+	ret = i915_mmio_setup(dev);
+	if (ret < 0)
 		goto put_bridge;
-	}
 
 	/* This must be called before any calls to HAS_PCH_* */
 	intel_detect_pch(dev);
@@ -945,7 +1047,7 @@
 
 	ret = i915_gem_gtt_init(dev);
 	if (ret)
-		goto out_freecsr;
+		goto out_uncore_fini;
 
 	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
 	 * otherwise the vga fbdev driver falls over. */
@@ -991,49 +1093,13 @@
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 					      aperture_size);
 
-	/* The i915 workqueue is primarily used for batched retirement of
-	 * requests (and thus managing bo) once the task has been completed
-	 * by the GPU. i915_gem_retire_requests() is called directly when we
-	 * need high-priority retirement, such as waiting for an explicit
-	 * bo.
-	 *
-	 * It is also used for periodic low-priority events, such as
-	 * idle-timers and recording error state.
-	 *
-	 * All tasks on the workqueue are expected to acquire the dev mutex
-	 * so there is no point in running more than one instance of the
-	 * workqueue at any time.  Use an ordered one.
-	 */
-	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-	if (dev_priv->wq == NULL) {
-		DRM_ERROR("Failed to create our workqueue.\n");
-		ret = -ENOMEM;
-		goto out_mtrrfree;
-	}
-
-	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-	if (dev_priv->hotplug.dp_wq == NULL) {
-		DRM_ERROR("Failed to create our dp workqueue.\n");
-		ret = -ENOMEM;
-		goto out_freewq;
-	}
-
-	dev_priv->gpu_error.hangcheck_wq =
-		alloc_ordered_workqueue("i915-hangcheck", 0);
-	if (dev_priv->gpu_error.hangcheck_wq == NULL) {
-		DRM_ERROR("Failed to create our hangcheck workqueue.\n");
-		ret = -ENOMEM;
-		goto out_freedpwq;
-	}
-
 	intel_irq_init(dev_priv);
 	intel_uncore_sanitize(dev);
 
-	/* Try to make sure MCHBAR is enabled before poking at it */
-	intel_setup_mchbar(dev);
 	intel_opregion_setup(dev);
 
-	i915_gem_load(dev);
+	i915_gem_load_init(dev);
+	i915_gem_shrinker_init(dev_priv);
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
 	 * integrated graphics even though the support isn't actually there
@@ -1046,8 +1112,10 @@
 	 * be lost or delayed, but we use them anyways to avoid
 	 * stuck interrupts on some machines.
 	 */
-	if (!IS_I945G(dev) && !IS_I945GM(dev))
-		pci_enable_msi(dev->pdev);
+	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+		if (pci_enable_msi(dev->pdev) < 0)
+			DRM_DEBUG_DRIVER("can't enable MSI");
+	}
 
 	intel_device_info_runtime_init(dev);
 
@@ -1097,38 +1165,29 @@
 	intel_power_domains_fini(dev_priv);
 	drm_vblank_cleanup(dev);
 out_gem_unload:
-	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-	unregister_shrinker(&dev_priv->mm.shrinker);
+	i915_gem_shrinker_cleanup(dev_priv);
 
 	if (dev->pdev->msi_enabled)
 		pci_disable_msi(dev->pdev);
 
 	intel_teardown_mchbar(dev);
 	pm_qos_remove_request(&dev_priv->pm_qos);
-	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-out_freedpwq:
-	destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_freewq:
-	destroy_workqueue(dev_priv->wq);
-out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
 	i915_global_gtt_cleanup(dev);
-out_freecsr:
-	intel_csr_ucode_fini(dev_priv);
+out_uncore_fini:
 	intel_uncore_fini(dev);
-	pci_iounmap(dev->pdev, dev_priv->regs);
+	i915_mmio_cleanup(dev);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
-free_priv:
-	kmem_cache_destroy(dev_priv->requests);
-	kmem_cache_destroy(dev_priv->vmas);
-	kmem_cache_destroy(dev_priv->objects);
-
+	i915_gem_load_cleanup(dev);
+out_runtime_pm_put:
 	intel_runtime_pm_put(dev_priv);
-
+	i915_workqueues_cleanup(dev_priv);
+out_free_priv:
 	kfree(dev_priv);
+
 	return ret;
 }
 
@@ -1153,8 +1212,7 @@
 
 	i915_teardown_sysfs(dev);
 
-	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-	unregister_shrinker(&dev_priv->mm.shrinker);
+	i915_gem_shrinker_cleanup(dev_priv);
 
 	io_mapping_free(dev_priv->gtt.mappable);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1182,6 +1240,8 @@
 	vga_switcheroo_unregister_client(dev->pdev);
 	vga_client_register(dev->pdev, NULL, NULL, NULL);
 
+	intel_csr_ucode_fini(dev_priv);
+
 	/* Free error state after interrupts are fully disabled. */
 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 	i915_destroy_error_state(dev);
@@ -1200,27 +1260,17 @@
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
 	intel_fbc_cleanup_cfb(dev_priv);
-	i915_gem_cleanup_stolen(dev);
 
-	intel_csr_ucode_fini(dev_priv);
-
-	intel_teardown_mchbar(dev);
-
-	destroy_workqueue(dev_priv->hotplug.dp_wq);
-	destroy_workqueue(dev_priv->wq);
-	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
 
 	i915_global_gtt_cleanup(dev);
 
 	intel_uncore_fini(dev);
-	if (dev_priv->regs != NULL)
-		pci_iounmap(dev->pdev, dev_priv->regs);
+	i915_mmio_cleanup(dev);
 
-	kmem_cache_destroy(dev_priv->requests);
-	kmem_cache_destroy(dev_priv->vmas);
-	kmem_cache_destroy(dev_priv->objects);
+	i915_gem_load_cleanup(dev);
 	pci_dev_put(dev_priv->bridge_dev);
+	i915_workqueues_cleanup(dev_priv);
 	kfree(dev_priv);
 
 	return 0;
@@ -1261,8 +1311,6 @@
 	i915_gem_context_close(dev, file);
 	i915_gem_release(dev, file);
 	mutex_unlock(&dev->struct_mutex);
-
-	intel_modeset_preclose(dev, file);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f357058..20e8200 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,9 +35,12 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drm_crtc_helper.h>
 
 static struct drm_driver driver;
@@ -600,13 +603,7 @@
 
 	intel_suspend_gt_powersave(dev);
 
-	/*
-	 * Disable CRTCs directly since we want to preserve sw state
-	 * for _thaw. Also, power gate the CRTC power wells.
-	 */
-	drm_modeset_lock_all(dev);
 	intel_display_suspend(dev);
-	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_suspend(dev);
 
@@ -761,9 +758,7 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	drm_modeset_lock_all(dev);
 	intel_display_resume(dev);
-	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_resume(dev);
 
@@ -969,6 +964,15 @@
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 
+	/*
+	 * apple-gmux is needed on dual GPU MacBook Pro
+	 * to probe the panel if we're the inactive GPU.
+	 */
+	if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+	    apple_gmux_present() && pdev != vga_default_device() &&
+	    !vga_switcheroo_handler_flags())
+		return -EPROBE_DEFER;
+
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1079,7 +1083,6 @@
 	 */
 	broxton_init_cdclk(dev);
 	broxton_ddi_phy_init(dev);
-	intel_prepare_ddi(dev);
 
 	return 0;
 }
@@ -1338,8 +1341,8 @@
 		return 0;
 
 	DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
-			wait_for_on ? "on" : "off",
-			I915_READ(VLV_GTLC_PW_STATUS));
+		      onoff(wait_for_on),
+		      I915_READ(VLV_GTLC_PW_STATUS));
 
 	/*
 	 * RC6 transitioning can be delayed up to 2 msec (see
@@ -1348,7 +1351,7 @@
 	err = wait_for(COND, 3);
 	if (err)
 		DRM_ERROR("timeout waiting for GT wells to go %s\n",
-			  wait_for_on ? "on" : "off");
+			  onoff(wait_for_on));
 
 	return err;
 #undef COND
@@ -1359,7 +1362,7 @@
 	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
 		return;
 
-	DRM_ERROR("GT register access while GT waking disabled\n");
+	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
 	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
 }
 
@@ -1503,6 +1506,10 @@
 
 	enable_rpm_wakeref_asserts(dev_priv);
 	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+
+	if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+		DRM_ERROR("Unclaimed access detected prior to suspending\n");
+
 	dev_priv->pm.suspended = true;
 
 	/*
@@ -1551,6 +1558,8 @@
 
 	intel_opregion_notify_adapter(dev, PCI_D0);
 	dev_priv->pm.suspended = false;
+	if (intel_uncore_unclaimed_mmio(dev_priv))
+		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
 	intel_guc_resume(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0847b9..1048093 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,7 @@
 #include <uapi/drm/drm_fourcc.h>
 
 #include <drm/drmP.h>
+#include "i915_params.h"
 #include "i915_reg.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
@@ -58,7 +59,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20151218"
+#define DRIVER_DATE		"20160229"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -69,11 +70,11 @@
 		BUILD_BUG_ON(__i915_warn_cond); \
 	WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
 #else
-#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 #endif
 
 #undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
 
 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
 			     (long) (x), __func__);
@@ -87,31 +88,25 @@
  */
 #define I915_STATE_WARN(condition, format...) ({			\
 	int __ret_warn_on = !!(condition);				\
-	if (unlikely(__ret_warn_on)) {					\
-		if (i915.verbose_state_checks)				\
-			WARN(1, format);				\
-		else 							\
+	if (unlikely(__ret_warn_on))					\
+		if (!WARN(i915.verbose_state_checks, format))		\
 			DRM_ERROR(format);				\
-	}								\
 	unlikely(__ret_warn_on);					\
 })
 
-#define I915_STATE_WARN_ON(condition) ({				\
-	int __ret_warn_on = !!(condition);				\
-	if (unlikely(__ret_warn_on)) {					\
-		if (i915.verbose_state_checks)				\
-			WARN(1, "WARN_ON(" #condition ")\n");		\
-		else 							\
-			DRM_ERROR("WARN_ON(" #condition ")\n");		\
-	}								\
-	unlikely(__ret_warn_on);					\
-})
+#define I915_STATE_WARN_ON(x)						\
+	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
 static inline const char *yesno(bool v)
 {
 	return v ? "yes" : "no";
 }
 
+static inline const char *onoff(bool v)
+{
+	return v ? "on" : "off";
+}
+
 enum pipe {
 	INVALID_PIPE = -1,
 	PIPE_A = 0,
@@ -266,6 +261,9 @@
 
 #define for_each_pipe(__dev_priv, __p) \
 	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+		for_each_if ((__mask) & (1 << (__p)))
 #define for_each_plane(__dev_priv, __pipe, __p)				\
 	for ((__p) = 0;							\
 	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
@@ -339,7 +337,7 @@
 		unsigned boosts;
 	} rps;
 
-	struct intel_engine_cs *bsd_ring;
+	unsigned int bsd_ring;
 };
 
 enum intel_dpll_id {
@@ -633,6 +631,7 @@
 			  struct dpll *best_clock);
 	int (*compute_pipe_wm)(struct intel_crtc *crtc,
 			       struct drm_atomic_state *state);
+	void (*program_watermarks)(struct intel_crtc_state *cstate);
 	void (*update_wm)(struct drm_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
 	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -657,9 +656,6 @@
 			  struct drm_i915_gem_object *obj,
 			  struct drm_i915_gem_request *req,
 			  uint32_t flags);
-	void (*update_primary_plane)(struct drm_crtc *crtc,
-				     struct drm_framebuffer *fb,
-				     int x, int y);
 	void (*hpd_irq_setup)(struct drm_device *dev);
 	/* clock updates for mode set */
 	/* cursor updates */
@@ -726,6 +722,8 @@
 		i915_reg_t reg_post;
 		u32 val_reset;
 	} fw_domain[FW_DOMAIN_ID_COUNT];
+
+	int unclaimed_mmio_check;
 };
 
 /* Iterate over initialised fw domains */
@@ -890,6 +888,9 @@
 		struct drm_i915_gem_object *state;
 		struct intel_ringbuffer *ringbuf;
 		int pin_count;
+		struct i915_vma *lrc_vma;
+		u64 lrc_desc;
+		uint32_t *lrc_reg_state;
 	} engine[I915_NUM_RINGS];
 
 	struct list_head link;
@@ -903,16 +904,15 @@
 	ORIGIN_DIRTYFB,
 };
 
-struct i915_fbc {
+struct intel_fbc {
 	/* This is always the inner lock when overlapping with struct_mutex and
 	 * it's the outer lock when overlapping with stolen_lock. */
 	struct mutex lock;
 	unsigned threshold;
-	unsigned int fb_id;
 	unsigned int possible_framebuffer_bits;
 	unsigned int busy_bits;
+	unsigned int visible_pipes_mask;
 	struct intel_crtc *crtc;
-	int y;
 
 	struct drm_mm_node compressed_fb;
 	struct drm_mm_node *compressed_llb;
@@ -922,18 +922,52 @@
 	bool enabled;
 	bool active;
 
+	struct intel_fbc_state_cache {
+		struct {
+			unsigned int mode_flags;
+			uint32_t hsw_bdw_pixel_rate;
+		} crtc;
+
+		struct {
+			unsigned int rotation;
+			int src_w;
+			int src_h;
+			bool visible;
+		} plane;
+
+		struct {
+			u64 ilk_ggtt_offset;
+			uint32_t pixel_format;
+			unsigned int stride;
+			int fence_reg;
+			unsigned int tiling_mode;
+		} fb;
+	} state_cache;
+
+	struct intel_fbc_reg_params {
+		struct {
+			enum pipe pipe;
+			enum plane plane;
+			unsigned int fence_y_offset;
+		} crtc;
+
+		struct {
+			u64 ggtt_offset;
+			uint32_t pixel_format;
+			unsigned int stride;
+			int fence_reg;
+		} fb;
+
+		int cfb_size;
+	} params;
+
 	struct intel_fbc_work {
 		bool scheduled;
+		u32 scheduled_vblank;
 		struct work_struct work;
-		struct drm_framebuffer *fb;
-		unsigned long enable_jiffies;
 	} work;
 
 	const char *no_fbc_reason;
-
-	bool (*is_active)(struct drm_i915_private *dev_priv);
-	void (*activate)(struct intel_crtc *crtc);
-	void (*deactivate)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -973,6 +1007,7 @@
 	unsigned busy_frontbuffer_bits;
 	bool psr2_support;
 	bool aux_frame_sync;
+	bool link_standby;
 };
 
 enum intel_pch {
@@ -1302,7 +1337,7 @@
 	bool busy;
 
 	/* the indicator for dispatch video commands on two BSD rings */
-	int bsd_ring_dispatch_index;
+	unsigned int bsd_ring_dispatch_index;
 
 	/** Bit 6 swizzling required for X tiling */
 	uint32_t bit_6_swizzle_x;
@@ -1488,7 +1523,7 @@
 		u8 seq_version;
 		u32 size;
 		u8 *data;
-		u8 *sequence[MIPI_SEQ_MAX];
+		const u8 *sequence[MIPI_SEQ_MAX];
 	} dsi;
 
 	int crt_ddc_pin;
@@ -1660,11 +1695,18 @@
 	u32 mask;
 };
 
-#define I915_MAX_WA_REGS 16
+/*
+ * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
+ * allowing it for RCS as we don't foresee any requirement of having
+ * a whitelist for other engines. When it is really required for
+ * other engines then the limit need to be increased.
+ */
+#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
 
 struct i915_workarounds {
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
 	u32 count;
+	u32 hw_whitelist_count[I915_NUM_RINGS];
 };
 
 struct i915_virtual_gpu {
@@ -1761,7 +1803,7 @@
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
 	struct i915_hotplug hotplug;
-	struct i915_fbc fbc;
+	struct intel_fbc fbc;
 	struct i915_drrs drrs;
 	struct intel_opregion opregion;
 	struct intel_vbt_data vbt;
@@ -1785,7 +1827,7 @@
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 	unsigned int skl_boot_cdclk;
-	unsigned int cdclk_freq, max_cdclk_freq;
+	unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
 	unsigned int max_dotclk_freq;
 	unsigned int hpll_freq;
 	unsigned int czclk_freq;
@@ -1810,6 +1852,7 @@
 
 	enum modeset_restore modeset_restore;
 	struct mutex modeset_restore_lock;
+	struct drm_atomic_state *modeset_restore_state;
 
 	struct list_head vm_list; /* Global list of all address spaces */
 	struct i915_gtt gtt; /* VM representing the global address space */
@@ -1830,8 +1873,13 @@
 	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
 #endif
 
+	/* dpll and cdclk state is protected by connection_mutex */
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+
+	unsigned int active_crtcs;
+	unsigned int min_pixclk[I915_MAX_PIPES];
+
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
 	struct i915_workarounds workarounds;
@@ -1946,6 +1994,8 @@
 		void (*stop_ring)(struct intel_engine_cs *ring);
 	} gt;
 
+	struct intel_context *kernel_context;
+
 	bool edp_low_vswing;
 
 	/* perform PHY state sanity checks? */
@@ -2270,9 +2320,9 @@
 
 };
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx,
-			   struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct intel_context *ctx);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -2581,6 +2631,11 @@
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
+
+/* WaRsDisableCoarsePowerGating:skl,bxt */
+#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
+						 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
+						  IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
@@ -2670,44 +2725,7 @@
 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
-/* i915_params.c */
-struct i915_params {
-	int modeset;
-	int panel_ignore_lid;
-	int semaphores;
-	int lvds_channel_mode;
-	int panel_use_ssc;
-	int vbt_sdvo_panel_type;
-	int enable_rc6;
-	int enable_dc;
-	int enable_fbc;
-	int enable_ppgtt;
-	int enable_execlists;
-	int enable_psr;
-	unsigned int preliminary_hw_support;
-	int disable_power_well;
-	int enable_ips;
-	int invert_brightness;
-	int enable_cmd_parser;
-	/* leave bools at the end to not create holes */
-	bool enable_hangcheck;
-	bool fastboot;
-	bool prefault_disable;
-	bool load_detect_test;
-	bool reset;
-	bool disable_display;
-	bool disable_vtd_wa;
-	bool enable_guc_submission;
-	int guc_log_level;
-	int use_mmio_flip;
-	int mmio_debug;
-	bool verbose_state_checks;
-	bool nuclear_pageflip;
-	int edp_vswing;
-};
-extern struct i915_params i915 __read_mostly;
-
-				/* i915_dma.c */
+/* i915_dma.c */
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2750,7 +2768,8 @@
 extern void intel_uncore_early_sanitize(struct drm_device *dev,
 					bool restore_forcewake);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_check_errors(struct drm_device *dev);
+extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
 extern void intel_uncore_fini(struct drm_device *dev);
 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
@@ -2872,7 +2891,8 @@
 				struct drm_file *file_priv);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
-void i915_gem_load(struct drm_device *dev);
+void i915_gem_load_init(struct drm_device *dev);
+void i915_gem_load_cleanup(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -3136,18 +3156,11 @@
 /* Some GGTT VM helpers */
 #define i915_obj_to_ggtt(obj) \
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-static inline bool i915_is_ggtt(struct i915_address_space *vm)
-{
-	struct i915_address_space *ggtt =
-		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
-	return vm == ggtt;
-}
 
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
 	WARN_ON(i915_is_ggtt(vm));
-
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
@@ -3285,6 +3298,7 @@
 #define I915_SHRINK_ACTIVE 0x8
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 
 
 /* i915_gem_tiling.c */
@@ -3455,16 +3469,14 @@
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bb44bad..3d31d3a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -138,10 +138,10 @@
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
 		if (vma->pin_count)
 			pinned += vma->node.size;
-	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
 		if (vma->pin_count)
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
@@ -272,7 +272,7 @@
 	int ret;
 
 	drm_gem_object_reference(&obj->base);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
 		if (i915_vma_unbind(vma))
 			break;
 
@@ -489,7 +489,7 @@
 
 	*needs_clflush = 0;
 
-	if (!obj->base.filp)
+	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
 		return -EINVAL;
 
 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -1251,7 +1251,7 @@
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(wait);
 	unsigned long timeout_expire;
-	s64 before, now;
+	s64 before = 0; /* Only to silence a compiler warning. */
 	int ret;
 
 	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1271,14 +1271,17 @@
 			return -ETIME;
 
 		timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+
+		/*
+		 * Record current time in case interrupted by signal, or wedged.
+		 */
+		before = ktime_get_raw_ns();
 	}
 
 	if (INTEL_INFO(dev_priv)->gen >= 6)
 		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
-	/* Record current time in case interrupted by signal, or wedged */
 	trace_i915_gem_request_wait_begin(req);
-	before = ktime_get_raw_ns();
 
 	/* Optimistic spin for the next jiffie before touching IRQs */
 	ret = __i915_spin_request(req, state);
@@ -1343,11 +1346,10 @@
 	finish_wait(&ring->irq_queue, &wait);
 
 out:
-	now = ktime_get_raw_ns();
 	trace_i915_gem_request_wait_end(req);
 
 	if (timeout) {
-		s64 tres = *timeout - (now - before);
+		s64 tres = *timeout - (ktime_get_raw_ns() - before);
 
 		*timeout = tres < 0 ? 0 : tres;
 
@@ -2414,7 +2416,7 @@
 	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
 	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
-	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void
@@ -2452,9 +2454,9 @@
 	list_move_tail(&obj->global_list,
 		       &to_i915(obj->base.dev)->mm.bound_list);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (!list_empty(&vma->mm_list))
-			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!list_empty(&vma->vm_link))
+			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	}
 
 	i915_gem_request_assign(&obj->last_fenced_req, NULL);
@@ -2677,10 +2679,8 @@
 		i915_gem_request_remove_from_client(req);
 
 	if (ctx) {
-		if (i915.enable_execlists) {
-			if (ctx != req->ring->default_context)
-				intel_lr_context_unpin(req);
-		}
+		if (i915.enable_execlists && ctx != req->i915->kernel_context)
+			intel_lr_context_unpin(ctx, req->ring);
 
 		i915_gem_context_unreference(ctx);
 	}
@@ -2688,9 +2688,10 @@
 	kmem_cache_free(req->i915->requests, req);
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx,
-			   struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+			 struct intel_context *ctx,
+			 struct drm_i915_gem_request **req_out)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct drm_i915_gem_request *req;
@@ -2753,6 +2754,31 @@
 	return ret;
 }
 
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct intel_context *ctx)
+{
+	struct drm_i915_gem_request *req;
+	int err;
+
+	if (ctx == NULL)
+		ctx = to_i915(engine->dev)->kernel_context;
+	err = __i915_gem_request_alloc(engine, ctx, &req);
+	return err ? ERR_PTR(err) : req;
+}
+
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
 	intel_ring_reserved_space_cancel(req->ringbuf);
@@ -2944,11 +2970,9 @@
 		i915_gem_retire_requests_ring(ring);
 		idle &= list_empty(&ring->request_list);
 		if (i915.enable_execlists) {
-			unsigned long flags;
-
-			spin_lock_irqsave(&ring->execlist_lock, flags);
+			spin_lock_irq(&ring->execlist_lock);
 			idle &= list_empty(&ring->execlist_queue);
-			spin_unlock_irqrestore(&ring->execlist_lock, flags);
+			spin_unlock_irq(&ring->execlist_lock);
 
 			intel_execlists_retire_requests(ring);
 		}
@@ -3170,9 +3194,13 @@
 			return 0;
 
 		if (*to_req == NULL) {
-			ret = i915_gem_request_alloc(to, to->default_context, to_req);
-			if (ret)
-				return ret;
+			struct drm_i915_gem_request *req;
+
+			req = i915_gem_request_alloc(to, NULL);
+			if (IS_ERR(req))
+				return PTR_ERR(req);
+
+			*to_req = req;
 		}
 
 		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3289,7 +3317,7 @@
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
-	if (list_empty(&vma->vma_link))
+	if (list_empty(&vma->obj_link))
 		return 0;
 
 	if (!drm_mm_node_allocated(&vma->node)) {
@@ -3308,8 +3336,7 @@
 			return ret;
 	}
 
-	if (i915_is_ggtt(vma->vm) &&
-	    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 		i915_gem_object_finish_gtt(obj);
 
 		/* release the fence reg _after_ flushing */
@@ -3323,8 +3350,8 @@
 	vma->vm->unbind_vma(vma);
 	vma->bound = 0;
 
-	list_del_init(&vma->mm_list);
-	if (i915_is_ggtt(vma->vm)) {
+	list_del_init(&vma->vm_link);
+	if (vma->is_ggtt) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
 		} else if (vma->ggtt_view.pages) {
@@ -3372,9 +3399,9 @@
 		if (!i915.enable_execlists) {
 			struct drm_i915_gem_request *req;
 
-			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-			if (ret)
-				return ret;
+			req = i915_gem_request_alloc(ring, NULL);
+			if (IS_ERR(req))
+				return PTR_ERR(req);
 
 			ret = i915_switch_context(req);
 			if (ret) {
@@ -3581,7 +3608,7 @@
 		goto err_remove_node;
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->mm_list, &vm->inactive_list);
+	list_add_tail(&vma->vm_link, &vm->inactive_list);
 
 	return vma;
 
@@ -3746,7 +3773,7 @@
 	/* And bump the LRU for this access */
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-		list_move_tail(&vma->mm_list,
+		list_move_tail(&vma->vm_link,
 			       &to_i915(obj->base.dev)->gtt.base.inactive_list);
 
 	return 0;
@@ -3781,7 +3808,7 @@
 	 * catch the issue of the CS prefetch crossing page boundaries and
 	 * reading an invalid PTE on older architectures.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
@@ -3844,7 +3871,7 @@
 			 */
 		}
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
@@ -3854,7 +3881,7 @@
 		}
 	}
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		vma->node.color = cache_level;
 	obj->cache_level = cache_level;
 
@@ -4328,10 +4355,20 @@
 	if (ret)
 		goto unref;
 
-	BUILD_BUG_ON(I915_NUM_RINGS > 16);
-	args->busy = obj->active << 16;
-	if (obj->last_write_req)
-		args->busy |= obj->last_write_req->ring->id;
+	args->busy = 0;
+	if (obj->active) {
+		int i;
+
+		for (i = 0; i < I915_NUM_RINGS; i++) {
+			struct drm_i915_gem_request *req;
+
+			req = obj->last_read_req[i];
+			if (req)
+				args->busy |= 1 << (16 + req->ring->exec_id);
+		}
+		if (obj->last_write_req)
+			args->busy |= obj->last_write_req->ring->exec_id;
+	}
 
 unref:
 	drm_gem_object_unreference(&obj->base);
@@ -4518,7 +4555,7 @@
 
 	trace_i915_gem_object_destroy(obj);
 
-	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		int ret;
 
 		vma->pin_count = 0;
@@ -4575,7 +4612,7 @@
 				     struct i915_address_space *vm)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
 		    vma->vm == vm)
 			return vma;
@@ -4592,7 +4629,7 @@
 	if (WARN_ONCE(!view, "no view specified"))
 		return ERR_PTR(-EINVAL);
 
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
@@ -4601,19 +4638,16 @@
 
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
-	struct i915_address_space *vm = NULL;
 	WARN_ON(vma->node.allocated);
 
 	/* Keep the vma as a placeholder in the execbuffer reservation lists */
 	if (!list_empty(&vma->exec_list))
 		return;
 
-	vm = vma->vm;
+	if (!vma->is_ggtt)
+		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
-	if (!i915_is_ggtt(vm))
-		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
-
-	list_del(&vma->vma_link);
+	list_del(&vma->obj_link);
 
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
@@ -4833,7 +4867,7 @@
 	 */
 	init_unused_rings(dev);
 
-	BUG_ON(!dev_priv->ring[RCS].default_context);
+	BUG_ON(!dev_priv->kernel_context);
 
 	ret = i915_ppgtt_init_hw(dev);
 	if (ret) {
@@ -4870,10 +4904,9 @@
 	for_each_ring(ring, dev_priv, i) {
 		struct drm_i915_gem_request *req;
 
-		WARN_ON(!ring->default_context);
-
-		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-		if (ret) {
+		req = i915_gem_request_alloc(ring, NULL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
 			i915_gem_cleanup_ringbuffer(dev);
 			goto out;
 		}
@@ -4996,7 +5029,7 @@
 }
 
 void
-i915_gem_load(struct drm_device *dev)
+i915_gem_load_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
@@ -5062,11 +5095,18 @@
 
 	dev_priv->mm.interruptible = true;
 
-	i915_gem_shrinker_init(dev_priv);
-
 	mutex_init(&dev_priv->fb_tracking.lock);
 }
 
+void i915_gem_load_cleanup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	kmem_cache_destroy(dev_priv->requests);
+	kmem_cache_destroy(dev_priv->vmas);
+	kmem_cache_destroy(dev_priv->objects);
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5113,6 +5153,8 @@
 	spin_lock_init(&file_priv->mm.lock);
 	INIT_LIST_HEAD(&file_priv->mm.request_list);
 
+	file_priv->bsd_ring = -1;
+
 	ret = i915_gem_context_open(dev, file);
 	if (ret)
 		kfree(file_priv);
@@ -5155,8 +5197,8 @@
 
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (i915_is_ggtt(vma->vm) &&
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
+		if (vma->is_ggtt &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm)
@@ -5174,7 +5216,7 @@
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
@@ -5188,8 +5230,8 @@
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (i915_is_ggtt(vma->vm) &&
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
+		if (vma->is_ggtt &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -5205,7 +5247,7 @@
 	struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (vma->vm == ggtt &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
@@ -5218,7 +5260,7 @@
 {
 	struct i915_vma *vma;
 
-	list_for_each_entry(vma, &o->vma_list, vma_link)
+	list_for_each_entry(vma, &o->vma_list, obj_link)
 		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
@@ -5235,8 +5277,8 @@
 
 	BUG_ON(list_empty(&o->vma_list));
 
-	list_for_each_entry(vma, &o->vma_list, vma_link) {
-		if (i915_is_ggtt(vma->vm) &&
+	list_for_each_entry(vma, &o->vma_list, obj_link) {
+		if (vma->is_ggtt &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm)
@@ -5248,7 +5290,7 @@
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		if (vma->pin_count > 0)
 			return true;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c25083c..5dd84e1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -142,7 +142,7 @@
 		return;
 
 	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-				 mm_list) {
+				 vm_link) {
 		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
 			break;
 	}
@@ -321,6 +321,18 @@
 	return ERR_PTR(ret);
 }
 
+static void i915_gem_context_unpin(struct intel_context *ctx,
+				   struct intel_engine_cs *engine)
+{
+	if (i915.enable_execlists) {
+		intel_lr_context_unpin(ctx, engine);
+	} else {
+		if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
+			i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+		i915_gem_context_unreference(ctx);
+	}
+}
+
 void i915_gem_context_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -329,40 +341,31 @@
 	if (i915.enable_execlists) {
 		struct intel_context *ctx;
 
-		list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		list_for_each_entry(ctx, &dev_priv->context_list, link)
 			intel_lr_context_reset(dev, ctx);
-		}
-
-		return;
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
 		struct intel_engine_cs *ring = &dev_priv->ring[i];
-		struct intel_context *lctx = ring->last_context;
 
-		if (lctx) {
-			if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-				i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
-
-			i915_gem_context_unreference(lctx);
+		if (ring->last_context) {
+			i915_gem_context_unpin(ring->last_context, ring);
 			ring->last_context = NULL;
 		}
-
-		/* Force the GPU state to be reinitialised on enabling */
-		if (ring->default_context)
-			ring->default_context->legacy_hw_ctx.initialized = false;
 	}
+
+	/* Force the GPU state to be reinitialised on enabling */
+	dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_context *ctx;
-	int i;
 
 	/* Init should only be called once per module load. Eventually the
 	 * restriction on the context_disabled check can be loosened. */
-	if (WARN_ON(dev_priv->ring[RCS].default_context))
+	if (WARN_ON(dev_priv->kernel_context))
 		return 0;
 
 	if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +395,7 @@
 		return PTR_ERR(ctx);
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_engine_cs *ring = &dev_priv->ring[i];
-
-		/* NB: RCS will hold a ref for all rings */
-		ring->default_context = ctx;
-	}
+	dev_priv->kernel_context = ctx;
 
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
 			i915.enable_execlists ? "LR" :
@@ -408,7 +406,7 @@
 void i915_gem_context_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+	struct intel_context *dctx = dev_priv->kernel_context;
 	int i;
 
 	if (dctx->legacy_hw_ctx.rcs_state) {
@@ -424,28 +422,21 @@
 		 * to offset the do_switch part, so that i915_gem_context_unreference()
 		 * can then free the base object correctly. */
 		WARN_ON(!dev_priv->ring[RCS].last_context);
-		if (dev_priv->ring[RCS].last_context == dctx) {
-			/* Fake switch to NULL context */
-			WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
-			i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-			i915_gem_context_unreference(dctx);
-			dev_priv->ring[RCS].last_context = NULL;
-		}
 
 		i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = I915_NUM_RINGS; --i >= 0;) {
 		struct intel_engine_cs *ring = &dev_priv->ring[i];
 
-		if (ring->last_context)
-			i915_gem_context_unreference(ring->last_context);
-
-		ring->default_context = NULL;
-		ring->last_context = NULL;
+		if (ring->last_context) {
+			i915_gem_context_unpin(ring->last_context, ring);
+			ring->last_context = NULL;
+		}
 	}
 
 	i915_gem_context_unreference(dctx);
+	dev_priv->kernel_context = NULL;
 }
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
@@ -864,6 +855,9 @@
 	if (!contexts_enabled(dev))
 		return -ENODEV;
 
+	if (args->pad != 0)
+		return -EINVAL;
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
@@ -887,6 +881,9 @@
 	struct intel_context *ctx;
 	int ret;
 
+	if (args->pad != 0)
+		return -EINVAL;
+
 	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
 		return -ENOENT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e9c2bfd..1f3eef6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -193,10 +193,26 @@
 
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 {
-	return -EINVAL;
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	int ret;
+
+	if (obj->base.size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->base.filp)
+		return -ENODEV;
+
+	ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->base.filp);
+
+	return 0;
 }
 
-static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	struct drm_device *dev = obj->base.dev;
@@ -212,6 +228,27 @@
 	return ret;
 }
 
+static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
+{
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	bool was_interruptible;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	was_interruptible = dev_priv->mm.interruptible;
+	dev_priv->mm.interruptible = false;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+
+	dev_priv->mm.interruptible = was_interruptible;
+	mutex_unlock(&dev->struct_mutex);
+
+	if (unlikely(ret))
+		DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.map_dma_buf = i915_gem_map_dma_buf,
 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -224,6 +261,7 @@
 	.vmap = i915_gem_dmabuf_vmap,
 	.vunmap = i915_gem_dmabuf_vunmap,
 	.begin_cpu_access = i915_gem_begin_cpu_access,
+	.end_cpu_access = i915_gem_end_cpu_access,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 07c6e4d..ea1f8d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@
 
 search_again:
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
 		if (mark_free(vma, &unwind_list))
 			goto found;
 	}
@@ -125,7 +125,7 @@
 		goto none;
 
 	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(vma, &vm->active_list, mm_list) {
+	list_for_each_entry(vma, &vm->active_list, vm_link) {
 		if (mark_free(vma, &unwind_list))
 			goto found;
 	}
@@ -270,7 +270,7 @@
 		WARN_ON(!list_empty(&vm->active_list));
 	}
 
-	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
 		if (vma->pin_count == 0)
 			WARN_ON(i915_vma_unbind(vma));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dccb517..1328bc5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -193,13 +193,10 @@
 		return eb->lut[handle];
 	} else {
 		struct hlist_head *head;
-		struct hlist_node *node;
+		struct i915_vma *vma;
 
 		head = &eb->buckets[handle & eb->and];
-		hlist_for_each(node, head) {
-			struct i915_vma *vma;
-
-			vma = hlist_entry(node, struct i915_vma, exec_node);
+		hlist_for_each_entry(vma, head, exec_node) {
 			if (vma->exec_handle == handle)
 				return vma;
 		}
@@ -671,7 +668,7 @@
 	if (entry->relocation_count == 0)
 		return false;
 
-	if (!i915_is_ggtt(vma->vm))
+	if (!vma->is_ggtt)
 		return false;
 
 	/* See also use_cpu_reloc() */
@@ -690,8 +687,7 @@
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
-	       !i915_is_ggtt(vma->vm));
+	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 
 	if (entry->alignment &&
 	    vma->node.start & (entry->alignment - 1))
@@ -1309,6 +1305,9 @@
 	exec_start = params->batch_obj_vm_offset +
 		     params->args_batch_start_offset;
 
+	if (exec_len == 0)
+		exec_len = params->batch_obj->base.size;
+
 	ret = ring->dispatch_execbuffer(params->request,
 					exec_start, exec_len,
 					params->dispatch_flags);
@@ -1325,33 +1324,23 @@
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The Ring ID is returned.
+ * The ring index is returned.
  */
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
-				  struct drm_file *file)
+static unsigned int
+gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
-	/* Check whether the file_priv is using one ring */
-	if (file_priv->bsd_ring)
-		return file_priv->bsd_ring->id;
-	else {
-		/* If no, use the ping-pong mechanism to select one ring */
-		int ring_id;
-
-		mutex_lock(&dev->struct_mutex);
-		if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
-			ring_id = VCS;
-			dev_priv->mm.bsd_ring_dispatch_index = 1;
-		} else {
-			ring_id = VCS2;
-			dev_priv->mm.bsd_ring_dispatch_index = 0;
-		}
-		file_priv->bsd_ring = &dev_priv->ring[ring_id];
-		mutex_unlock(&dev->struct_mutex);
-		return ring_id;
+	/* Check whether the file_priv has already selected one ring. */
+	if ((int)file_priv->bsd_ring < 0) {
+		/* If not, use the ping-pong mechanism to select one. */
+		mutex_lock(&dev_priv->dev->struct_mutex);
+		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
+		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+		mutex_unlock(&dev_priv->dev->struct_mutex);
 	}
+
+	return file_priv->bsd_ring;
 }
 
 static struct drm_i915_gem_object *
@@ -1374,6 +1363,64 @@
 	return vma->obj;
 }
 
+#define I915_USER_RINGS (4)
+
+static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+	[I915_EXEC_DEFAULT]	= RCS,
+	[I915_EXEC_RENDER]	= RCS,
+	[I915_EXEC_BLT]		= BCS,
+	[I915_EXEC_BSD]		= VCS,
+	[I915_EXEC_VEBOX]	= VECS
+};
+
+static int
+eb_select_ring(struct drm_i915_private *dev_priv,
+	       struct drm_file *file,
+	       struct drm_i915_gem_execbuffer2 *args,
+	       struct intel_engine_cs **ring)
+{
+	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+	if (user_ring_id > I915_USER_RINGS) {
+		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+		return -EINVAL;
+	}
+
+	if ((user_ring_id != I915_EXEC_BSD) &&
+	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+		DRM_DEBUG("execbuf with non bsd ring but with invalid "
+			  "bsd dispatch flags: %d\n", (int)(args->flags));
+		return -EINVAL;
+	}
+
+	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
+
+		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
+			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
+			   bsd_idx <= I915_EXEC_BSD_RING2) {
+			bsd_idx >>= I915_EXEC_BSD_SHIFT;
+			bsd_idx--;
+		} else {
+			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
+				  bsd_idx);
+			return -EINVAL;
+		}
+
+		*ring = &dev_priv->ring[_VCS(bsd_idx)];
+	} else {
+		*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+	}
+
+	if (!intel_ring_initialized(*ring)) {
+		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		       struct drm_file *file,
@@ -1381,6 +1428,7 @@
 		       struct drm_i915_gem_exec_object2 *exec)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *req = NULL;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1411,51 +1459,9 @@
 	if (args->flags & I915_EXEC_IS_PINNED)
 		dispatch_flags |= I915_DISPATCH_PINNED;
 
-	if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
-		DRM_DEBUG("execbuf with unknown ring: %d\n",
-			  (int)(args->flags & I915_EXEC_RING_MASK));
-		return -EINVAL;
-	}
-
-	if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
-	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
-		DRM_DEBUG("execbuf with non bsd ring but with invalid "
-			"bsd dispatch flags: %d\n", (int)(args->flags));
-		return -EINVAL;
-	} 
-
-	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
-		ring = &dev_priv->ring[RCS];
-	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
-		if (HAS_BSD2(dev)) {
-			int ring_id;
-
-			switch (args->flags & I915_EXEC_BSD_MASK) {
-			case I915_EXEC_BSD_DEFAULT:
-				ring_id = gen8_dispatch_bsd_ring(dev, file);
-				ring = &dev_priv->ring[ring_id];
-				break;
-			case I915_EXEC_BSD_RING1:
-				ring = &dev_priv->ring[VCS];
-				break;
-			case I915_EXEC_BSD_RING2:
-				ring = &dev_priv->ring[VCS2];
-				break;
-			default:
-				DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
-					  (int)(args->flags & I915_EXEC_BSD_MASK));
-				return -EINVAL;
-			}
-		} else
-			ring = &dev_priv->ring[VCS];
-	} else
-		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
-
-	if (!intel_ring_initialized(ring)) {
-		DRM_DEBUG("execbuf with invalid ring: %d\n",
-			  (int)(args->flags & I915_EXEC_RING_MASK));
-		return -EINVAL;
-	}
+	ret = eb_select_ring(dev_priv, file, args, &ring);
+	if (ret)
+		return ret;
 
 	if (args->buffer_count < 1) {
 		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1602,11 +1608,13 @@
 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	ret = i915_gem_request_alloc(ring, ctx, &params->request);
-	if (ret)
+	req = i915_gem_request_alloc(ring, ctx);
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
 		goto err_batch_unpin;
+	}
 
-	ret = i915_gem_request_add_to_client(params->request, file);
+	ret = i915_gem_request_add_to_client(req, file);
 	if (ret)
 		goto err_batch_unpin;
 
@@ -1622,6 +1630,7 @@
 	params->dispatch_flags          = dispatch_flags;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
+	params->request                 = req;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
@@ -1645,8 +1654,8 @@
 	 * must be freed again. If it was submitted then it is being tracked
 	 * on the active request list and no clean up is required here.
 	 */
-	if (ret && params->request)
-		i915_gem_request_cancel(params->request);
+	if (ret && !IS_ERR_OR_NULL(req))
+		i915_gem_request_cancel(req);
 
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56f4f2e..49e4f26 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,9 +96,11 @@
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
-const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_normal = {
+	.type = I915_GGTT_VIEW_NORMAL,
+};
 const struct i915_ggtt_view i915_ggtt_view_rotated = {
-        .type = I915_GGTT_VIEW_ROTATED
+	.type = I915_GGTT_VIEW_ROTATED,
 };
 
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
@@ -2130,6 +2132,25 @@
 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
+static void gtt_write_workarounds(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* This function is for gtt related workarounds. This function is
+	 * called on driver load and after a GPU reset, so you can place
+	 * workarounds here even if they get overwritten by GPU reset.
+	 */
+	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
+	if (IS_BROADWELL(dev))
+		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+	else if (IS_CHERRYVIEW(dev))
+		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+	else if (IS_SKYLAKE(dev))
+		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+	else if (IS_BROXTON(dev))
+		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+}
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2146,6 +2167,8 @@
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
+	gtt_write_workarounds(dev);
+
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
 	 * and the PDPs are contained within the context itself.  We don't
 	 * need to do anything here. */
@@ -2735,7 +2758,7 @@
 		}
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
 	}
 
 	/* Clear any non-preallocated blocks */
@@ -2807,6 +2830,8 @@
 		ppgtt->base.cleanup(&ppgtt->base);
 	}
 
+	i915_gem_cleanup_stolen(dev);
+
 	if (drm_mm_initialized(&vm->mm)) {
 		if (intel_vgpu_active(dev))
 			intel_vgt_deballoon();
@@ -3173,12 +3198,21 @@
 	}
 
 	gtt->base.dev = dev;
+	gtt->base.is_ggtt = true;
 
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
 			     &gtt->mappable_base, &gtt->mappable_end);
 	if (ret)
 		return ret;
 
+	/*
+	 * Initialise stolen early so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(dev);
+	if (ret)
+		goto out_gtt_cleanup;
+
 	/* GMADR is the PCI mmio aperture into the global GTT. */
 	DRM_INFO("Memory usable by graphics device = %lluM\n",
 		 gtt->base.total >> 20);
@@ -3198,6 +3232,11 @@
 	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
 	return 0;
+
+out_gtt_cleanup:
+	gtt->base.cleanup(&dev_priv->gtt.base);
+
+	return ret;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -3220,7 +3259,7 @@
 	vm = &dev_priv->gtt.base;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		flush = false;
-		list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (vma->vm != vm)
 				continue;
 
@@ -3276,19 +3315,20 @@
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vma_link);
-	INIT_LIST_HEAD(&vma->mm_list);
+	INIT_LIST_HEAD(&vma->vm_link);
+	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
 	vma->vm = vm;
 	vma->obj = obj;
+	vma->is_ggtt = i915_is_ggtt(vm);
 
 	if (i915_is_ggtt(vm))
 		vma->ggtt_view = *ggtt_view;
-
-	list_add_tail(&vma->vma_link, &obj->vma_list);
-	if (!i915_is_ggtt(vm))
+	else
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
+	list_add_tail(&vma->obj_link, &obj->vma_list);
+
 	return vma;
 }
 
@@ -3329,8 +3369,9 @@
 }
 
 static struct scatterlist *
-rotate_pages(dma_addr_t *in, unsigned int offset,
+rotate_pages(const dma_addr_t *in, unsigned int offset,
 	     unsigned int width, unsigned int height,
+	     unsigned int stride,
 	     struct sg_table *st, struct scatterlist *sg)
 {
 	unsigned int column, row;
@@ -3342,7 +3383,7 @@
 	}
 
 	for (column = 0; column < width; column++) {
-		src_idx = width * (height - 1) + column;
+		src_idx = stride * (height - 1) + column;
 		for (row = 0; row < height; row++) {
 			st->nents++;
 			/* We don't need the pages, but need to initialize
@@ -3353,7 +3394,7 @@
 			sg_dma_address(sg) = in[offset + src_idx];
 			sg_dma_len(sg) = PAGE_SIZE;
 			sg = sg_next(sg);
-			src_idx -= width;
+			src_idx -= stride;
 		}
 	}
 
@@ -3361,10 +3402,9 @@
 }
 
 static struct sg_table *
-intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
 			  struct drm_i915_gem_object *obj)
 {
-	struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
 	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
 	unsigned int size_pages_uv;
 	struct sg_page_iter sg_iter;
@@ -3406,6 +3446,7 @@
 	/* Rotate the pages. */
 	sg = rotate_pages(page_addr_list, 0,
 		     rot_info->width_pages, rot_info->height_pages,
+		     rot_info->width_pages,
 		     st, NULL);
 
 	/* Append the UV plane if NV12. */
@@ -3421,6 +3462,7 @@
 		rotate_pages(page_addr_list, uv_start_page,
 			     rot_info->width_pages_uv,
 			     rot_info->height_pages_uv,
+			     rot_info->width_pages_uv,
 			     st, sg);
 	}
 
@@ -3502,7 +3544,7 @@
 		vma->ggtt_view.pages = vma->obj->pages;
 	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
 		vma->ggtt_view.pages =
-			intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
 	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
 		vma->ggtt_view.pages =
 			intel_partial_pages(&vma->ggtt_view, vma->obj);
@@ -3558,13 +3600,9 @@
 		return 0;
 
 	if (vma->bound == 0 && vma->vm->allocate_va_range) {
-		trace_i915_va_alloc(vma->vm,
-				    vma->node.start,
-				    vma->node.size,
-				    VM_TO_TRACE_NAME(vma->vm));
-
 		/* XXX: i915_vma_pin() will fix this +- hack */
 		vma->pin_count++;
+		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
@@ -3596,7 +3634,7 @@
 	if (view->type == I915_GGTT_VIEW_NORMAL) {
 		return obj->base.size;
 	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
-		return view->params.rotation_info.size;
+		return view->params.rotated.size;
 	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
 		return view->params.partial.size << PAGE_SHIFT;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b448ad8..8774f1b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -44,7 +44,6 @@
 
 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
-
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
@@ -156,7 +155,7 @@
 			u64 offset;
 			unsigned int size;
 		} partial;
-		struct intel_rotation_info rotation_info;
+		struct intel_rotation_info rotated;
 	} params;
 
 	struct sg_table *pages;
@@ -184,6 +183,7 @@
 #define GLOBAL_BIND	(1<<0)
 #define LOCAL_BIND	(1<<1)
 	unsigned int bound : 4;
+	bool is_ggtt : 1;
 
 	/**
 	 * Support different GGTT views into the same object.
@@ -195,9 +195,9 @@
 	struct i915_ggtt_view ggtt_view;
 
 	/** This object's place on the active/inactive lists */
-	struct list_head mm_list;
+	struct list_head vm_link;
 
-	struct list_head vma_link; /* Link in the object's VMA list */
+	struct list_head obj_link; /* Link in the object's VMA list */
 
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
@@ -276,6 +276,8 @@
 	u64 start;		/* Start offset always 0 for dri2 */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
+	bool is_ggtt;
+
 	struct i915_page_scratch *scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -331,6 +333,8 @@
 			u32 flags);
 };
 
+#define i915_is_ggtt(V) ((V)->is_ggtt)
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -343,6 +347,8 @@
 
 	size_t stolen_size;		/* Total size of stolen memory */
 	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
+	size_t stolen_reserved_base;
+	size_t stolen_reserved_size;
 	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
@@ -417,7 +423,7 @@
 static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
 				      uint32_t pde_shift)
 {
-	const uint64_t mask = ~((1 << pde_shift) - 1);
+	const uint64_t mask = ~((1ULL << pde_shift) - 1);
 	uint64_t end;
 
 	WARN_ON(length == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f7df54a..d3c473f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -47,6 +47,46 @@
 #endif
 }
 
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	int count = 0;
+
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (drm_mm_node_allocated(&vma->node))
+			count++;
+		if (vma->pin_count)
+			count++;
+	}
+
+	return count;
+}
+
+static bool swap_available(void)
+{
+	return get_nr_swap_pages() > 0;
+}
+
+static bool can_release_pages(struct drm_i915_gem_object *obj)
+{
+	/* Only report true if by unbinding the object and putting its pages
+	 * we can actually make forward progress towards freeing physical
+	 * pages.
+	 *
+	 * If the pages are pinned for any other reason than being bound
+	 * to the GPU, simply unbinding from the GPU is not going to succeed
+	 * in releasing our pin count on the pages themselves.
+	 */
+	if (obj->pages_pin_count != num_vma_bound(obj))
+		return false;
+
+	/* We can only return physical pages to the system if we can either
+	 * discard the contents (because the user has marked them as being
+	 * purgeable) or if we can move their contents out to swap.
+	 */
+	return swap_available() || obj->madv == I915_MADV_DONTNEED;
+}
+
 /**
  * i915_gem_shrink - Shrink buffer object caches
  * @dev_priv: i915 device
@@ -129,11 +169,14 @@
 			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
 				continue;
 
+			if (!can_release_pages(obj))
+				continue;
+
 			drm_gem_object_reference(&obj->base);
 
 			/* For the unbound phase, this should be a no-op! */
 			list_for_each_entry_safe(vma, v,
-						 &obj->vma_list, vma_link)
+						 &obj->vma_list, obj_link)
 				if (i915_vma_unbind(vma))
 					break;
 
@@ -188,21 +231,6 @@
 	return true;
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-	int count = 0;
-
-	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (drm_mm_node_allocated(&vma->node))
-			count++;
-		if (vma->pin_count)
-			count++;
-	}
-
-	return count;
-}
-
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -222,7 +250,7 @@
 			count += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+		if (!obj->active && can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 	}
 
@@ -339,8 +367,20 @@
 	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
 	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
 	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
-	register_shrinker(&dev_priv->mm.shrinker);
+	WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-	register_oom_notifier(&dev_priv->mm.oom_notifier);
+	WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+}
+
+/**
+ * i915_gem_shrinker_cleanup - Clean up i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function unregisters the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+{
+	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+	unregister_shrinker(&dev_priv->mm.shrinker);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 3476877..2e6e9fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -458,6 +458,9 @@
 		return 0;
 	}
 
+	dev_priv->gtt.stolen_reserved_base = reserved_base;
+	dev_priv->gtt.stolen_reserved_size = reserved_size;
+
 	/* It is possible for the reserved area to end before the end of stolen
 	 * memory, so just consider the start. */
 	reserved_total = stolen_top - reserved_base;
@@ -569,6 +572,9 @@
 	if (obj->pages == NULL)
 		goto cleanup;
 
+	obj->get_page.sg = obj->pages->sgl;
+	obj->get_page.last = 0;
+
 	i915_gem_object_pin_pages(obj);
 	obj->stolen = stolen;
 
@@ -632,6 +638,8 @@
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 		return NULL;
 
+	lockdep_assert_held(&dev->struct_mutex);
+
 	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
 			stolen_offset, gtt_offset, size);
 
@@ -689,7 +697,7 @@
 
 		vma->bound |= GLOBAL_BIND;
 		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+		list_add_tail(&vma->vm_link, &ggtt->inactive_list);
 	}
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 90dbf81..6be40f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -49,21 +49,18 @@
 	struct hlist_node node;
 	struct mmu_notifier mn;
 	struct rb_root objects;
-	struct list_head linear;
-	bool has_linear;
 };
 
 struct i915_mmu_object {
 	struct i915_mmu_notifier *mn;
+	struct drm_i915_gem_object *obj;
 	struct interval_tree_node it;
 	struct list_head link;
-	struct drm_i915_gem_object *obj;
 	struct work_struct work;
-	bool active;
-	bool is_linear;
+	bool attached;
 };
 
-static void __cancel_userptr__worker(struct work_struct *work)
+static void cancel_userptr(struct work_struct *work)
 {
 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
 	struct drm_i915_gem_object *obj = mo->obj;
@@ -81,7 +78,7 @@
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
 			int ret = i915_vma_unbind(vma);
 			WARN_ON(ret && ret != -EIO);
 		}
@@ -94,24 +91,22 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
-static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+static void add_object(struct i915_mmu_object *mo)
 {
-	unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
+	if (mo->attached)
+		return;
 
-	/* The mmu_object is released late when destroying the
-	 * GEM object so it is entirely possible to gain a
-	 * reference on an object in the process of being freed
-	 * since our serialisation is via the spinlock and not
-	 * the struct_mutex - and consequently use it after it
-	 * is freed and then double free it.
-	 */
-	if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
-		schedule_work(&mo->work);
-		/* only schedule one work packet to avoid the refleak */
-		mo->active = false;
-	}
+	interval_tree_insert(&mo->it, &mo->mn->objects);
+	mo->attached = true;
+}
 
-	return end;
+static void del_object(struct i915_mmu_object *mo)
+{
+	if (!mo->attached)
+		return;
+
+	interval_tree_remove(&mo->it, &mo->mn->objects);
+	mo->attached = false;
 }
 
 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
@@ -122,28 +117,36 @@
 	struct i915_mmu_notifier *mn =
 		container_of(_mn, struct i915_mmu_notifier, mn);
 	struct i915_mmu_object *mo;
+	struct interval_tree_node *it;
+	LIST_HEAD(cancelled);
+
+	if (RB_EMPTY_ROOT(&mn->objects))
+		return;
 
 	/* interval ranges are inclusive, but invalidate range is exclusive */
 	end--;
 
 	spin_lock(&mn->lock);
-	if (mn->has_linear) {
-		list_for_each_entry(mo, &mn->linear, link) {
-			if (mo->it.last < start || mo->it.start > end)
-				continue;
+	it = interval_tree_iter_first(&mn->objects, start, end);
+	while (it) {
+		/* The mmu_object is released late when destroying the
+		 * GEM object so it is entirely possible to gain a
+		 * reference on an object in the process of being freed
+		 * since our serialisation is via the spinlock and not
+		 * the struct_mutex - and consequently use it after it
+		 * is freed and then double free it. To prevent that
+		 * use-after-free we only acquire a reference on the
+		 * object if it is not in the process of being destroyed.
+		 */
+		mo = container_of(it, struct i915_mmu_object, it);
+		if (kref_get_unless_zero(&mo->obj->base.refcount))
+			schedule_work(&mo->work);
 
-			cancel_userptr(mo);
-		}
-	} else {
-		struct interval_tree_node *it;
-
-		it = interval_tree_iter_first(&mn->objects, start, end);
-		while (it) {
-			mo = container_of(it, struct i915_mmu_object, it);
-			start = cancel_userptr(mo);
-			it = interval_tree_iter_next(it, start, end);
-		}
+		list_add(&mo->link, &cancelled);
+		it = interval_tree_iter_next(it, start, end);
 	}
+	list_for_each_entry(mo, &cancelled, link)
+		del_object(mo);
 	spin_unlock(&mn->lock);
 }
 
@@ -164,8 +167,6 @@
 	spin_lock_init(&mn->lock);
 	mn->mn.ops = &i915_gem_userptr_notifier;
 	mn->objects = RB_ROOT;
-	INIT_LIST_HEAD(&mn->linear);
-	mn->has_linear = false;
 
 	 /* Protected by mmap_sem (write-lock) */
 	ret = __mmu_notifier_register(&mn->mn, mm);
@@ -177,85 +178,6 @@
 	return mn;
 }
 
-static int
-i915_mmu_notifier_add(struct drm_device *dev,
-		      struct i915_mmu_notifier *mn,
-		      struct i915_mmu_object *mo)
-{
-	struct interval_tree_node *it;
-	int ret = 0;
-
-	/* By this point we have already done a lot of expensive setup that
-	 * we do not want to repeat just because the caller (e.g. X) has a
-	 * signal pending (and partly because of that expensive setup, X
-	 * using an interrupt timer is likely to get stuck in an EINTR loop).
-	 */
-	mutex_lock(&dev->struct_mutex);
-
-	/* Make sure we drop the final active reference (and thereby
-	 * remove the objects from the interval tree) before we do
-	 * the check for overlapping objects.
-	 */
-	i915_gem_retire_requests(dev);
-
-	spin_lock(&mn->lock);
-	it = interval_tree_iter_first(&mn->objects,
-				      mo->it.start, mo->it.last);
-	if (it) {
-		struct drm_i915_gem_object *obj;
-
-		/* We only need to check the first object in the range as it
-		 * either has cancelled gup work queued and we need to
-		 * return back to the user to give time for the gup-workers
-		 * to flush their object references upon which the object will
-		 * be removed from the interval-tree, or the the range is
-		 * still in use by another client and the overlap is invalid.
-		 *
-		 * If we do have an overlap, we cannot use the interval tree
-		 * for fast range invalidation.
-		 */
-
-		obj = container_of(it, struct i915_mmu_object, it)->obj;
-		if (!obj->userptr.workers)
-			mn->has_linear = mo->is_linear = true;
-		else
-			ret = -EAGAIN;
-	} else
-		interval_tree_insert(&mo->it, &mn->objects);
-
-	if (ret == 0)
-		list_add(&mo->link, &mn->linear);
-
-	spin_unlock(&mn->lock);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
-{
-	struct i915_mmu_object *mo;
-
-	list_for_each_entry(mo, &mn->linear, link)
-		if (mo->is_linear)
-			return true;
-
-	return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
-		      struct i915_mmu_object *mo)
-{
-	spin_lock(&mn->lock);
-	list_del(&mo->link);
-	if (mo->is_linear)
-		mn->has_linear = i915_mmu_notifier_has_linear(mn);
-	else
-		interval_tree_remove(&mo->it, &mn->objects);
-	spin_unlock(&mn->lock);
-}
-
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
@@ -265,7 +187,9 @@
 	if (mo == NULL)
 		return;
 
-	i915_mmu_notifier_del(mo->mn, mo);
+	spin_lock(&mo->mn->lock);
+	del_object(mo);
+	spin_unlock(&mo->mn->lock);
 	kfree(mo);
 
 	obj->userptr.mmu_object = NULL;
@@ -299,7 +223,6 @@
 {
 	struct i915_mmu_notifier *mn;
 	struct i915_mmu_object *mo;
-	int ret;
 
 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
@@ -316,16 +239,10 @@
 		return -ENOMEM;
 
 	mo->mn = mn;
-	mo->it.start = obj->userptr.ptr;
-	mo->it.last = mo->it.start + obj->base.size - 1;
 	mo->obj = obj;
-	INIT_WORK(&mo->work, __cancel_userptr__worker);
-
-	ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
-	if (ret) {
-		kfree(mo);
-		return ret;
-	}
+	mo->it.start = obj->userptr.ptr;
+	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
+	INIT_WORK(&mo->work, cancel_userptr);
 
 	obj->userptr.mmu_object = mo;
 	return 0;
@@ -552,8 +469,10 @@
 	/* In order to serialise get_pages with an outstanding
 	 * cancel_userptr, we must drop the struct_mutex and try again.
 	 */
-	if (!value || !work_pending(&obj->userptr.mmu_object->work))
-		obj->userptr.mmu_object->active = value;
+	if (!value)
+		del_object(obj->userptr.mmu_object);
+	else if (!work_pending(&obj->userptr.mmu_object->work))
+		add_object(obj->userptr.mmu_object);
 	else
 		ret = -EAGAIN;
 	spin_unlock(&obj->userptr.mmu_object->mn->lock);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 06ca408..831895b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -365,6 +365,10 @@
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+	err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+	err_printf(m, "PCI Subsystem: %04x:%04x\n",
+		   dev->pdev->subsystem_vendor,
+		   dev->pdev->subsystem_device);
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
 	if (HAS_CSR(dev)) {
@@ -732,7 +736,7 @@
 	struct i915_vma *vma;
 	int i = 0;
 
-	list_for_each_entry(vma, head, mm_list) {
+	list_for_each_entry(vma, head, vm_link) {
 		capture_bo(err++, vma);
 		if (++i == count)
 			break;
@@ -755,7 +759,7 @@
 		if (err == last)
 			break;
 
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				capture_bo(err++, vma);
 	}
@@ -1050,7 +1054,7 @@
 			if (request)
 				rbuf = request->ctx->engine[ring->id].ringbuf;
 			else
-				rbuf = ring->default_context->engine[ring->id].ringbuf;
+				rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
 		} else
 			rbuf = ring->buffer;
 
@@ -1123,12 +1127,12 @@
 	int i;
 
 	i = 0;
-	list_for_each_entry(vma, &vm->active_list, mm_list)
+	list_for_each_entry(vma, &vm->active_list, vm_link)
 		i++;
 	error->active_bo_count[ndx] = i;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		list_for_each_entry(vma, &obj->vma_list, vma_link)
+		list_for_each_entry(vma, &obj->vma_list, obj_link)
 			if (vma->vm == vm && vma->pin_count > 0)
 				i++;
 	}
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 685c799..e4ba582 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -40,6 +40,7 @@
 #define   GS_MIA_CORE_STATE		  (1 << GS_MIA_SHIFT)
 
 #define SOFT_SCRATCH(n)			_MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT		16
 
 #define UOS_RSA_SCRATCH(i)		_MMIO(0xc200 + (i) * 4)
 #define   UOS_RSA_SCRATCH_MAX_COUNT	  64
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 05aa7e6..d7543ef 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,10 +158,8 @@
 
 	data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
 	/* WaRsDisableCoarsePowerGating:skl,bxt */
-	if (!intel_enable_rc6(dev_priv->dev) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
-	    (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
-	    (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
+	if (!intel_enable_rc6(dev) ||
+	    NEEDS_WaRsDisableCoarsePowerGating(dev))
 		data[1] = 0;
 	else
 		/* bit 0 and 1 are for Render and Media domain separately */
@@ -246,6 +244,9 @@
 			db_exc.cookie = 1;
 	}
 
+	/* Finally, update the cached copy of the GuC's WQ head */
+	gc->wq_head = desc->head;
+
 	kunmap_atomic(base);
 	return ret;
 }
@@ -375,6 +376,8 @@
 static void guc_init_ctx_desc(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_engine_cs *ring;
 	struct intel_context *ctx = client->owner;
 	struct guc_context_desc desc;
 	struct sg_table *sg;
@@ -387,10 +390,8 @@
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct guc_execlist_context *lrc = &desc.lrc[i];
-		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
-		struct intel_engine_cs *ring;
+	for_each_ring(ring, dev_priv, i) {
+		struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
 		struct drm_i915_gem_object *obj;
 		uint64_t ctx_desc;
 
@@ -405,7 +406,6 @@
 		if (!obj)
 			break;	/* XXX: continue? */
 
-		ring = ringbuf->ring;
 		ctx_desc = intel_lr_context_descriptor(ctx, ring);
 		lrc->context_desc = (u32)ctx_desc;
 
@@ -413,16 +413,16 @@
 		lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
 				LRC_STATE_PN * PAGE_SIZE;
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-				(ring->id << GUC_ELC_ENGINE_OFFSET);
+				(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ringbuf->obj;
+		obj = ctx->engine[i].ringbuf->obj;
 
 		lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
 		lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
 		lrc->ring_next_free_location = lrc->ring_begin;
 		lrc->ring_current_tail_pointer_value = 0;
 
-		desc.engines_used |= (1 << ring->id);
+		desc.engines_used |= (1 << ring->guc_id);
 	}
 
 	WARN_ON(desc.engines_used == 0);
@@ -471,28 +471,30 @@
 			     sizeof(desc) * client->ctx_index);
 }
 
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
 	struct guc_process_desc *desc;
 	void *base;
 	u32 size = sizeof(struct guc_wq_item);
 	int ret = -ETIMEDOUT, timeout_counter = 200;
 
+	if (!gc)
+		return 0;
+
+	/* Quickly return if wq space is available since last time we cache the
+	 * head position. */
+	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
+		return 0;
+
 	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
 	desc = base + gc->proc_desc_offset;
 
 	while (timeout_counter-- > 0) {
-		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
-			*offset = gc->wq_tail;
+		gc->wq_head = desc->head;
 
-			/* advance the tail for next workqueue item */
-			gc->wq_tail += size;
-			gc->wq_tail &= gc->wq_size - 1;
-
-			/* this will break the loop */
-			timeout_counter = 0;
+		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
 			ret = 0;
+			break;
 		}
 
 		if (timeout_counter)
@@ -507,15 +509,18 @@
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
 				  struct drm_i915_gem_request *rq)
 {
-	enum intel_ring_id ring_id = rq->ring->id;
 	struct guc_wq_item *wqi;
 	void *base;
-	u32 tail, wq_len, wq_off = 0;
-	int ret;
+	u32 tail, wq_len, wq_off, space;
 
-	ret = guc_get_workqueue_space(gc, &wq_off);
-	if (ret)
-		return ret;
+	space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+	if (WARN_ON(space < sizeof(struct guc_wq_item)))
+		return -ENOSPC; /* shouldn't happen */
+
+	/* postincrement WQ tail for next time */
+	wq_off = gc->wq_tail;
+	gc->wq_tail += sizeof(struct guc_wq_item);
+	gc->wq_tail &= gc->wq_size - 1;
 
 	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
 	 * should not have the case where structure wqi is across page, neither
@@ -537,7 +542,7 @@
 	wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
 	wqi->header = WQ_TYPE_INORDER |
 			(wq_len << WQ_LEN_SHIFT) |
-			(ring_id << WQ_TARGET_SHIFT) |
+			(rq->ring->guc_id << WQ_TARGET_SHIFT) |
 			WQ_NO_WCFLUSH_WAIT;
 
 	/* The GuC wants only the low-order word of the context descriptor */
@@ -553,29 +558,6 @@
 	return 0;
 }
 
-#define CTX_RING_BUFFER_START		0x08
-
-/* Update the ringbuffer pointer in a saved context image */
-static void lr_context_update(struct drm_i915_gem_request *rq)
-{
-	enum intel_ring_id ring_id = rq->ring->id;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
-	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-	struct page *page;
-	uint32_t *reg_state;
-
-	BUG_ON(!ctx_obj);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
-	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
-	page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-	reg_state = kmap_atomic(page);
-
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
-
-	kunmap_atomic(reg_state);
-}
-
 /**
  * i915_guc_submit() - Submit commands through GuC
  * @client:	the guc client where commands will go through
@@ -587,18 +569,14 @@
 		    struct drm_i915_gem_request *rq)
 {
 	struct intel_guc *guc = client->guc;
-	enum intel_ring_id ring_id = rq->ring->id;
+	unsigned int engine_id = rq->ring->guc_id;
 	int q_ret, b_ret;
 
-	/* Need this because of the deferred pin ctx and ring */
-	/* Shall we move this right after ring is pinned? */
-	lr_context_update(rq);
-
 	q_ret = guc_add_workqueue_item(client, rq);
 	if (q_ret == 0)
 		b_ret = guc_ring_doorbell(client);
 
-	client->submissions[ring_id] += 1;
+	client->submissions[engine_id] += 1;
 	if (q_ret) {
 		client->q_fail += 1;
 		client->retcode = q_ret;
@@ -608,8 +586,8 @@
 	} else {
 		client->retcode = 0;
 	}
-	guc->submissions[ring_id] += 1;
-	guc->last_seqno[ring_id] = rq->seqno;
+	guc->submissions[engine_id] += 1;
+	guc->last_seqno[engine_id] = rq->seqno;
 
 	return q_ret;
 }
@@ -832,6 +810,96 @@
 	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
+static void init_guc_policies(struct guc_policies *policies)
+{
+	struct guc_policy *policy;
+	u32 p, i;
+
+	policies->dpc_promote_time = 500000;
+	policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+	for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+		for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+			policy = &policies->policy[p][i];
+
+			policy->execution_quantum = 1000000;
+			policy->preemption_time = 500000;
+			policy->fault_time = 250000;
+			policy->policy_flags = 0;
+		}
+	}
+
+	policies->is_valid = 1;
+}
+
+static void guc_create_ads(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_gem_object *obj;
+	struct guc_ads *ads;
+	struct guc_policies *policies;
+	struct guc_mmio_reg_state *reg_state;
+	struct intel_engine_cs *ring;
+	struct page *page;
+	u32 size, i;
+
+	/* The ads obj includes the struct itself and buffers passed to GuC */
+	size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
+			sizeof(struct guc_mmio_reg_state) +
+			GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+
+	obj = guc->ads_obj;
+	if (!obj) {
+		obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+		if (!obj)
+			return;
+
+		guc->ads_obj = obj;
+	}
+
+	page = i915_gem_object_get_page(obj, 0);
+	ads = kmap(page);
+
+	/*
+	 * The GuC requires a "Golden Context" when it reinitialises
+	 * engines after a reset. Here we use the Render ring default
+	 * context, which must already exist and be pinned in the GGTT,
+	 * so its address won't change after we've told the GuC where
+	 * to find it.
+	 */
+	ring = &dev_priv->ring[RCS];
+	ads->golden_context_lrca = ring->status_page.gfx_addr;
+
+	for_each_ring(ring, dev_priv, i)
+		ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+
+	/* GuC scheduling policies */
+	policies = (void *)ads + sizeof(struct guc_ads);
+	init_guc_policies(policies);
+
+	ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+			sizeof(struct guc_ads);
+
+	/* MMIO reg state */
+	reg_state = (void *)policies + sizeof(struct guc_policies);
+
+	for_each_ring(ring, dev_priv, i) {
+		reg_state->mmio_white_list[ring->guc_id].mmio_start =
+			ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+		/* Nothing to be saved or restored for now. */
+		reg_state->mmio_white_list[ring->guc_id].count = 0;
+	}
+
+	ads->reg_state_addr = ads->scheduler_policies +
+			sizeof(struct guc_policies);
+
+	ads->reg_state_buffer = ads->reg_state_addr +
+			sizeof(struct guc_mmio_reg_state);
+
+	kunmap(page);
+}
+
 /*
  * Set up the memory resources to be shared with the GuC.  At this point,
  * we require just one object that can be mapped through the GGTT.
@@ -858,6 +926,8 @@
 
 	guc_create_log(guc);
 
+	guc_create_ads(guc);
+
 	return 0;
 }
 
@@ -865,7 +935,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc *guc = &dev_priv->guc;
-	struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+	struct intel_context *ctx = dev_priv->kernel_context;
 	struct i915_guc_client *client;
 
 	/* client for execbuf submission */
@@ -896,6 +966,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc *guc = &dev_priv->guc;
 
+	gem_release_guc_obj(dev_priv->guc.ads_obj);
+	guc->ads_obj = NULL;
+
 	gem_release_guc_obj(dev_priv->guc.log_obj);
 	guc->log_obj = NULL;
 
@@ -919,7 +992,7 @@
 	if (!i915.enable_guc_submission)
 		return 0;
 
-	ctx = dev_priv->ring[RCS].default_context;
+	ctx = dev_priv->kernel_context;
 
 	data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
 	/* any value greater than GUC_POWER_D0 */
@@ -945,7 +1018,7 @@
 	if (!i915.enable_guc_submission)
 		return 0;
 
-	ctx = dev_priv->ring[RCS].default_context;
+	ctx = dev_priv->kernel_context;
 
 	data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
 	data[1] = GUC_POWER_D0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fa8afa7..d1a46ef 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1651,6 +1651,12 @@
 	int pipe;
 
 	spin_lock(&dev_priv->irq_lock);
+
+	if (!dev_priv->display_irqs_enabled) {
+		spin_unlock(&dev_priv->irq_lock);
+		return;
+	}
+
 	for_each_pipe(dev_priv, pipe) {
 		i915_reg_t reg;
 		u32 mask, iir_bit = 0;
@@ -2188,10 +2194,6 @@
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
 	disable_rpm_wakeref_asserts(dev_priv);
 
-	/* We get interrupts on unclaimed registers, so check for this before we
-	 * do any I915_{READ,WRITE}. */
-	intel_uncore_check_errors(dev);
-
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -2268,43 +2270,20 @@
 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
-static irqreturn_t gen8_irq_handler(int irq, void *arg)
+static irqreturn_t
+gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
-	struct drm_device *dev = arg;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 master_ctl;
+	struct drm_device *dev = dev_priv->dev;
 	irqreturn_t ret = IRQ_NONE;
-	uint32_t tmp = 0;
+	u32 iir;
 	enum pipe pipe;
-	u32 aux_mask = GEN8_AUX_CHANNEL_A;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return IRQ_NONE;
-
-	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
-	disable_rpm_wakeref_asserts(dev_priv);
-
-	if (INTEL_INFO(dev_priv)->gen >= 9)
-		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
-			GEN9_AUX_CHANNEL_D;
-
-	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
-	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
-	if (!master_ctl)
-		goto out;
-
-	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
-
-	/* Find, clear, then process each source of interrupt */
-
-	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
 
 	if (master_ctl & GEN8_DE_MISC_IRQ) {
-		tmp = I915_READ(GEN8_DE_MISC_IIR);
-		if (tmp) {
-			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+		iir = I915_READ(GEN8_DE_MISC_IIR);
+		if (iir) {
+			I915_WRITE(GEN8_DE_MISC_IIR, iir);
 			ret = IRQ_HANDLED;
-			if (tmp & GEN8_DE_MISC_GSE)
+			if (iir & GEN8_DE_MISC_GSE)
 				intel_opregion_asle_intr(dev);
 			else
 				DRM_ERROR("Unexpected DE Misc interrupt\n");
@@ -2314,33 +2293,40 @@
 	}
 
 	if (master_ctl & GEN8_DE_PORT_IRQ) {
-		tmp = I915_READ(GEN8_DE_PORT_IIR);
-		if (tmp) {
+		iir = I915_READ(GEN8_DE_PORT_IIR);
+		if (iir) {
+			u32 tmp_mask;
 			bool found = false;
-			u32 hotplug_trigger = 0;
 
-			if (IS_BROXTON(dev_priv))
-				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
-			else if (IS_BROADWELL(dev_priv))
-				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
-
-			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+			I915_WRITE(GEN8_DE_PORT_IIR, iir);
 			ret = IRQ_HANDLED;
 
-			if (tmp & aux_mask) {
+			tmp_mask = GEN8_AUX_CHANNEL_A;
+			if (INTEL_INFO(dev_priv)->gen >= 9)
+				tmp_mask |= GEN9_AUX_CHANNEL_B |
+					    GEN9_AUX_CHANNEL_C |
+					    GEN9_AUX_CHANNEL_D;
+
+			if (iir & tmp_mask) {
 				dp_aux_irq_handler(dev);
 				found = true;
 			}
 
-			if (hotplug_trigger) {
-				if (IS_BROXTON(dev))
-					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
-				else
-					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
-				found = true;
+			if (IS_BROXTON(dev_priv)) {
+				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
+				if (tmp_mask) {
+					bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+					found = true;
+				}
+			} else if (IS_BROADWELL(dev_priv)) {
+				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
+				if (tmp_mask) {
+					ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+					found = true;
+				}
 			}
 
-			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+			if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
 				gmbus_irq_handler(dev);
 				found = true;
 			}
@@ -2353,49 +2339,51 @@
 	}
 
 	for_each_pipe(dev_priv, pipe) {
-		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
+		u32 flip_done, fault_errors;
 
 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
 			continue;
 
-		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-		if (pipe_iir) {
-			ret = IRQ_HANDLED;
-			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
-
-			if (pipe_iir & GEN8_PIPE_VBLANK &&
-			    intel_pipe_handle_vblank(dev, pipe))
-				intel_check_page_flip(dev, pipe);
-
-			if (INTEL_INFO(dev_priv)->gen >= 9)
-				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
-			else
-				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
-
-			if (flip_done) {
-				intel_prepare_page_flip(dev, pipe);
-				intel_finish_page_flip_plane(dev, pipe);
-			}
-
-			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
-				hsw_pipe_crc_irq_handler(dev, pipe);
-
-			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
-				intel_cpu_fifo_underrun_irq_handler(dev_priv,
-								    pipe);
-
-
-			if (INTEL_INFO(dev_priv)->gen >= 9)
-				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
-			else
-				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-
-			if (fault_errors)
-				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
-					  pipe_name(pipe),
-					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-		} else
+		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (!iir) {
 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+			continue;
+		}
+
+		ret = IRQ_HANDLED;
+		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
+
+		if (iir & GEN8_PIPE_VBLANK &&
+		    intel_pipe_handle_vblank(dev, pipe))
+			intel_check_page_flip(dev, pipe);
+
+		flip_done = iir;
+		if (INTEL_INFO(dev_priv)->gen >= 9)
+			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
+		else
+			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+		if (flip_done) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
+
+		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
+			hsw_pipe_crc_irq_handler(dev, pipe);
+
+		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+
+		fault_errors = iir;
+		if (INTEL_INFO(dev_priv)->gen >= 9)
+			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+		else
+			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+		if (fault_errors)
+			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+				  pipe_name(pipe),
+				  fault_errors);
 	}
 
 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
@@ -2405,15 +2393,15 @@
 		 * scheme also closed the SDE interrupt handling race we've seen
 		 * on older pch-split platforms. But this needs testing.
 		 */
-		u32 pch_iir = I915_READ(SDEIIR);
-		if (pch_iir) {
-			I915_WRITE(SDEIIR, pch_iir);
+		iir = I915_READ(SDEIIR);
+		if (iir) {
+			I915_WRITE(SDEIIR, iir);
 			ret = IRQ_HANDLED;
 
 			if (HAS_PCH_SPT(dev_priv))
-				spt_irq_handler(dev, pch_iir);
+				spt_irq_handler(dev, iir);
 			else
-				cpt_irq_handler(dev, pch_iir);
+				cpt_irq_handler(dev, iir);
 		} else {
 			/*
 			 * Like on previous PCH there seems to be something
@@ -2423,10 +2411,36 @@
 		}
 	}
 
+	return ret;
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 master_ctl;
+	irqreturn_t ret;
+
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
+	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
+	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+	if (!master_ctl)
+		return IRQ_NONE;
+
+	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+
+	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
+	disable_rpm_wakeref_asserts(dev_priv);
+
+	/* Find, clear, then process each source of interrupt */
+	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
+
 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 	POSTING_READ_FW(GEN8_MASTER_IRQ);
 
-out:
 	enable_rpm_wakeref_asserts(dev_priv);
 
 	return ret;
@@ -2949,14 +2963,44 @@
 		ring->hangcheck.deadlock = 0;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static bool subunits_stuck(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 tmp;
+	u32 instdone[I915_NUM_INSTDONE_REG];
+	bool stuck;
+	int i;
 
+	if (ring->id != RCS)
+		return true;
+
+	i915_get_extra_instdone(ring->dev, instdone);
+
+	/* There might be unstable subunit states even when
+	 * actual head is not moving. Filter out the unstable ones by
+	 * accumulating the undone -> done transitions and only
+	 * consider those as progress.
+	 */
+	stuck = true;
+	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
+		const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+
+		if (tmp != ring->hangcheck.instdone[i])
+			stuck = false;
+
+		ring->hangcheck.instdone[i] |= tmp;
+	}
+
+	return stuck;
+}
+
+static enum intel_ring_hangcheck_action
+head_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
 	if (acthd != ring->hangcheck.acthd) {
+
+		/* Clear subunit states on head movement */
+		memset(ring->hangcheck.instdone, 0,
+		       sizeof(ring->hangcheck.instdone));
+
 		if (acthd > ring->hangcheck.max_acthd) {
 			ring->hangcheck.max_acthd = acthd;
 			return HANGCHECK_ACTIVE;
@@ -2965,6 +3009,24 @@
 		return HANGCHECK_ACTIVE_LOOP;
 	}
 
+	if (!subunits_stuck(ring))
+		return HANGCHECK_ACTIVE;
+
+	return HANGCHECK_HUNG;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_ring_hangcheck_action ha;
+	u32 tmp;
+
+	ha = head_stuck(ring, acthd);
+	if (ha != HANGCHECK_HUNG)
+		return ha;
+
 	if (IS_GEN2(dev))
 		return HANGCHECK_HUNG;
 
@@ -3032,6 +3094,12 @@
 	 */
 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 
+	/* As enabling the GPU requires fairly extensive mmio access,
+	 * periodically arm the mmio checker to see if we are triggering
+	 * any invalid access.
+	 */
+	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
 	for_each_ring(ring, dev_priv, i) {
 		u64 acthd;
 		u32 seqno;
@@ -3106,7 +3174,11 @@
 			if (ring->hangcheck.score > 0)
 				ring->hangcheck.score--;
 
+			/* Clear head and subunit states on seqno movement */
 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+
+			memset(ring->hangcheck.instdone, 0,
+			       sizeof(ring->hangcheck.instdone));
 		}
 
 		ring->hangcheck.seqno = seqno;
@@ -3277,23 +3349,30 @@
 				     unsigned int pipe_mask)
 {
 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+	enum pipe pipe;
 
 	spin_lock_irq(&dev_priv->irq_lock);
-	if (pipe_mask & 1 << PIPE_A)
-		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
-				  dev_priv->de_irq_mask[PIPE_A],
-				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
-	if (pipe_mask & 1 << PIPE_B)
-		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
-				  dev_priv->de_irq_mask[PIPE_B],
-				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
-	if (pipe_mask & 1 << PIPE_C)
-		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
-				  dev_priv->de_irq_mask[PIPE_C],
-				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+				  dev_priv->de_irq_mask[pipe],
+				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+				     unsigned int pipe_mask)
+{
+	enum pipe pipe;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/* make sure we're done processing display irqs */
+	synchronize_irq(dev_priv->dev->irq);
+}
+
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 835d609..278c9c4 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,7 @@
  * IN THE SOFTWARE.
  */
 
+#include "i915_params.h"
 #include "i915_drv.h"
 
 struct i915_params i915 __read_mostly = {
@@ -37,7 +38,7 @@
 	.enable_execlists = -1,
 	.enable_hangcheck = true,
 	.enable_ppgtt = -1,
-	.enable_psr = 0,
+	.enable_psr = -1,
 	.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
 	.disable_power_well = -1,
 	.enable_ips = 1,
@@ -48,7 +49,6 @@
 	.invert_brightness = 0,
 	.disable_display = 0,
 	.enable_cmd_parser = 1,
-	.disable_vtd_wa = 0,
 	.use_mmio_flip = 0,
 	.mmio_debug = 0,
 	.verbose_state_checks = 1,
@@ -91,7 +91,7 @@
 	"Enable frame buffer compression for power savings "
 	"(default: -1 (use per-chip default))");
 
-module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
 MODULE_PARM_DESC(lvds_channel_mode,
 	 "Specify LVDS channel mode "
 	 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
@@ -101,7 +101,7 @@
 	"Use Spread Spectrum Clock with panels [LVDS/eDP] "
 	"(default: auto from VBT)");
 
-module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
 MODULE_PARM_DESC(vbt_sdvo_panel_type,
 	"Override/Ignore selection of SDVO panel mode in the VBT "
 	"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -126,9 +126,11 @@
 	"(-1=auto [default], 0=disabled, 1=enabled)");
 
 module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR "
+		 "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+		 "Default: -1 (use per-chip default)");
 
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
 MODULE_PARM_DESC(preliminary_hw_support,
 	"Enable preliminary hardware support.");
 
@@ -162,12 +164,9 @@
 	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
 	"It will then be included in an upcoming module version.");
 
-module_param_named(disable_display, i915.disable_display, bool, 0600);
+module_param_named(disable_display, i915.disable_display, bool, 0400);
 MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
 
-module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
-MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
-
 module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
 MODULE_PARM_DESC(enable_cmd_parser,
 		 "Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
new file mode 100644
index 0000000..bd5026b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_PARAMS_H_
+#define _I915_PARAMS_H_
+
+#include <linux/cache.h> /* for __read_mostly */
+
+struct i915_params {
+	int modeset;
+	int panel_ignore_lid;
+	int semaphores;
+	int lvds_channel_mode;
+	int panel_use_ssc;
+	int vbt_sdvo_panel_type;
+	int enable_rc6;
+	int enable_dc;
+	int enable_fbc;
+	int enable_ppgtt;
+	int enable_execlists;
+	int enable_psr;
+	unsigned int preliminary_hw_support;
+	int disable_power_well;
+	int enable_ips;
+	int invert_brightness;
+	int enable_cmd_parser;
+	int guc_log_level;
+	int use_mmio_flip;
+	int mmio_debug;
+	int edp_vswing;
+	/* leave bools at the end to not create holes */
+	bool enable_hangcheck;
+	bool fastboot;
+	bool prefault_disable;
+	bool load_detect_test;
+	bool reset;
+	bool disable_display;
+	bool enable_guc_submission;
+	bool verbose_state_checks;
+	bool nuclear_pageflip;
+};
+
+extern struct i915_params i915 __read_mostly;
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4897728..f76cbf3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -610,16 +610,17 @@
 #define   IOSF_BYTE_ENABLES_SHIFT		4
 #define   IOSF_BAR_SHIFT			1
 #define   IOSF_SB_BUSY				(1<<0)
-#define   IOSF_PORT_BUNIT			0x3
-#define   IOSF_PORT_PUNIT			0x4
+#define   IOSF_PORT_BUNIT			0x03
+#define   IOSF_PORT_PUNIT			0x04
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
-#define   IOSF_PORT_DPIO_2			0x1a
 #define   IOSF_PORT_GPIO_NC			0x13
 #define   IOSF_PORT_CCK				0x14
-#define   IOSF_PORT_CCU				0xA9
-#define   IOSF_PORT_GPS_CORE			0x48
-#define   IOSF_PORT_FLISDSI			0x1B
+#define   IOSF_PORT_DPIO_2			0x1a
+#define   IOSF_PORT_FLISDSI			0x1b
+#define   IOSF_PORT_GPIO_SC			0x48
+#define   IOSF_PORT_GPIO_SUS			0xa8
+#define   IOSF_PORT_CCU				0xa9
 #define VLV_IOSF_DATA				_MMIO(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				_MMIO(VLV_DISPLAY_BASE + 0x2108)
 
@@ -1635,6 +1636,9 @@
 #define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
 #define   RING_WAIT_SEMAPHORE	(1<<10) /* gen6+ */
 
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define   RING_MAX_NONPRIV_SLOTS  12
+
 #define GEN7_TLB_RD_ADDR	_MMIO(0x4700)
 
 #if 0
@@ -1711,6 +1715,11 @@
 #define FPGA_DBG		_MMIO(0x42300)
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 
+#define CLAIM_ER		_MMIO(VLV_DISPLAY_BASE + 0x2028)
+#define   CLAIM_ER_CLR		(1 << 31)
+#define   CLAIM_ER_OVERFLOW	(1 << 16)
+#define   CLAIM_ER_CTR_MASK	0xffff
+
 #define DERRMR		_MMIO(0x44050)
 /* Note that HBLANK events are reserved on bdw+ */
 #define   DERRMR_PIPEA_SCANLINE		(1<<0)
@@ -5941,6 +5950,7 @@
 #define  ILK_INTERNAL_GRAPHICS_DISABLE	(1 << 31)
 #define  ILK_INTERNAL_DISPLAY_DISABLE	(1 << 30)
 #define  ILK_DISPLAY_DEBUG_DISABLE	(1 << 29)
+#define  IVB_PIPE_C_DISABLE		(1 << 28)
 #define  ILK_HDCP_DISABLE		(1 << 25)
 #define  ILK_eDP_A_DISABLE		(1 << 24)
 #define  HSW_CDCLK_LIMIT		(1 << 24)
@@ -5987,10 +5997,19 @@
 #define SKL_DFSM_CDCLK_LIMIT_540	(1 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_450	(2 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_337_5	(3 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE		(1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE		(1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE		(1 << 28)
+
+#define GEN7_FF_SLICE_CS_CHICKEN1	_MMIO(0x20e0)
+#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL	(1<<14)
 
 #define FF_SLICE_CS_CHICKEN2			_MMIO(0x20e4)
 #define  GEN9_TSG_BARRIER_ACK_DISABLE		(1<<8)
 
+#define GEN9_CS_DEBUG_MODE1		_MMIO(0x20ec)
+#define GEN8_CS_CHICKEN1		_MMIO(0x2580)
+
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1		_MMIO(0x7010)
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
@@ -6036,6 +6055,8 @@
 #define  HDC_FORCE_NON_COHERENT			(1<<4)
 #define  HDC_BARRIER_PERFORMANCE_DISABLE	(1<<10)
 
+#define GEN8_HDC_CHICKEN1			_MMIO(0x7304)
+
 /* GEN9 chicken */
 #define SLICE_ECO_CHICKEN0			_MMIO(0x7308)
 #define   PIXEL_MASK_CAMMING_DISABLE		(1 << 14)
@@ -6766,6 +6787,16 @@
 
 #define  VLV_PMWGICZ				_MMIO(0x1300a4)
 
+#define  RC6_LOCATION				_MMIO(0xD40)
+#define	   RC6_CTX_IN_DRAM			(1 << 0)
+#define  RC6_CTX_BASE				_MMIO(0xD48)
+#define    RC6_CTX_BASE_MASK			0xFFFFFFF0
+#define  PWRCTX_MAXCNT_RCSUNIT			_MMIO(0x2054)
+#define  PWRCTX_MAXCNT_VCSUNIT0			_MMIO(0x12054)
+#define  PWRCTX_MAXCNT_BCSUNIT			_MMIO(0x22054)
+#define  PWRCTX_MAXCNT_VECSUNIT			_MMIO(0x1A054)
+#define  PWRCTX_MAXCNT_VCSUNIT1			_MMIO(0x1C054)
+#define    IDLE_TIME_MASK			0xFFFFF
 #define  FORCEWAKE				_MMIO(0xA18C)
 #define  FORCEWAKE_VLV				_MMIO(0x1300b0)
 #define  FORCEWAKE_ACK_VLV			_MMIO(0x1300b4)
@@ -6904,6 +6935,7 @@
 #define GEN6_RPDEUC				_MMIO(0xA084)
 #define GEN6_RPDEUCSW				_MMIO(0xA088)
 #define GEN6_RC_STATE				_MMIO(0xA094)
+#define   RC6_STATE				(1 << 18)
 #define GEN6_RC1_WAKE_RATE_LIMIT		_MMIO(0xA098)
 #define GEN6_RC6_WAKE_RATE_LIMIT		_MMIO(0xA09C)
 #define GEN6_RC6pp_WAKE_RATE_LIMIT		_MMIO(0xA0A0)
@@ -7536,6 +7568,7 @@
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
 #define  DC_STATE_DEBUG                  _MMIO(0x45520)
+#define  DC_STATE_DEBUG_MASK_CORES	(1<<0)
 #define  DC_STATE_DEBUG_MASK_MEMORY_UP	(1<<1)
 
 /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
@@ -8155,4 +8188,11 @@
 #define GEN9_VEBOX_MOCS(i)	_MMIO(0xcb00 + (i) * 4)	/* Video MOCS registers */
 #define GEN9_BLT_MOCS(i)	_MMIO(0xcc00 + (i) * 4)	/* Blitter MOCS registers */
 
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW  0x67F1427F /* max/min for LRA1/2 */
+#define   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV  0x5FF101FF /* max/min for LRA1/2 */
+#define   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL  0x67F1427F /*    "        " */
+#define   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT  0x5FF101FF /*    "        " */
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a8af594..34e061a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@
 	}
 
 	/* only restore FBC info on the platform that supports FBC*/
-	intel_fbc_disable(dev_priv);
+	intel_fbc_global_disable(dev_priv);
 
 	/* restore FBC interval */
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 37e3f0d..c6188dd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -164,7 +164,7 @@
 	     struct bin_attribute *attr, char *buf,
 	     loff_t offset, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -200,7 +200,7 @@
 	      struct bin_attribute *attr, char *buf,
 	      loff_t offset, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev = kobj_to_dev(kobj);
 	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -521,7 +521,7 @@
 				loff_t off, size_t count)
 {
 
-	struct device *kdev = container_of(kobj, struct device, kobj);
+	struct device *kdev = kobj_to_dev(kobj);
 	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct i915_error_state_file_priv error_priv;
@@ -556,7 +556,7 @@
 				 struct bin_attribute *attr, char *buf,
 				 loff_t off, size_t count)
 {
-	struct device *kdev = container_of(kobj, struct device, kobj);
+	struct device *kdev = kobj_to_dev(kobj);
 	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	int ret;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 52b2d40..fa09e55 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -175,35 +175,24 @@
 		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
 );
 
-#define VM_TO_TRACE_NAME(vm) \
-	(i915_is_ggtt(vm) ? "G" : \
-		      "P")
-
-DECLARE_EVENT_CLASS(i915_va,
-	TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
-	TP_ARGS(vm, start, length, name),
+TRACE_EVENT(i915_va_alloc,
+	TP_PROTO(struct i915_vma *vma),
+	TP_ARGS(vma),
 
 	TP_STRUCT__entry(
 		__field(struct i915_address_space *, vm)
 		__field(u64, start)
 		__field(u64, end)
-		__string(name, name)
 	),
 
 	TP_fast_assign(
-		__entry->vm = vm;
-		__entry->start = start;
-		__entry->end = start + length - 1;
-		__assign_str(name, name);
+		__entry->vm = vma->vm;
+		__entry->start = vma->node.start;
+		__entry->end = vma->node.start + vma->node.size - 1;
 	),
 
-	TP_printk("vm=%p (%s), 0x%llx-0x%llx",
-		  __entry->vm, __get_str(name),  __entry->start, __entry->end)
-);
-
-DEFINE_EVENT(i915_va, i915_va_alloc,
-	     TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
-	     TP_ARGS(vm, start, length, name)
+	TP_printk("vm=%p (%c), 0x%llx-0x%llx",
+		  __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P',  __entry->start, __entry->end)
 );
 
 DECLARE_EVENT_CLASS(i915_px_entry,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index d0b1c9a..8e579a8 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -97,6 +97,7 @@
 	crtc_state->disable_lp_wm = false;
 	crtc_state->disable_cxsr = false;
 	crtc_state->wm_changed = false;
+	crtc_state->fb_changed = false;
 
 	return &crtc_state->base;
 }
@@ -308,5 +309,5 @@
 {
 	struct intel_atomic_state *state = to_intel_atomic_state(s);
 	drm_atomic_state_default_clear(&state->base);
-	state->dpll_set = false;
+	state->dpll_set = state->modeset = false;
 }
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index c6bb0fc..e0b851a 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -152,9 +152,9 @@
 	intel_state->clip.x1 = 0;
 	intel_state->clip.y1 = 0;
 	intel_state->clip.x2 =
-		crtc_state->base.active ? crtc_state->pipe_src_w : 0;
+		crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
 	intel_state->clip.y2 =
-		crtc_state->base.active ? crtc_state->pipe_src_h : 0;
+		crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -194,8 +194,16 @@
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct intel_plane_state *intel_state =
 		to_intel_plane_state(plane->state);
+	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+	struct drm_crtc_state *crtc_state =
+		drm_atomic_get_existing_crtc_state(old_state->state, crtc);
 
-	intel_plane->commit_plane(plane, intel_state);
+	if (intel_state->visible)
+		intel_plane->update_plane(plane,
+					  to_intel_crtc_state(crtc_state),
+					  intel_state);
+	else
+		intel_plane->disable_plane(plane, crtc);
 }
 
 const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index eba3e0f..bf62a19 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -31,11 +31,49 @@
 #include "i915_drv.h"
 #include "intel_bios.h"
 
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
 #define	SLAVE_ADDR1	0x70
 #define	SLAVE_ADDR2	0x72
 
 static int panel_type;
 
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+	/* The MIPI Sequence Block v3+ has a separate size field. */
+	if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+		return *((const u32 *)(block_base + 4));
+	else
+		return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+	return _get_blocksize(block_data - 3);
+}
+
 static const void *
 find_section(const void *_bdb, int section_id)
 {
@@ -52,14 +90,8 @@
 	/* walk the sections looking for section_id */
 	while (index + 3 < total) {
 		current_id = *(base + index);
-		index++;
-
-		current_size = *((const u16 *)(base + index));
-		index += 2;
-
-		/* The MIPI Sequence Block v3+ has a separate size field. */
-		if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
-			current_size = *((const u32 *)(base + index + 1));
+		current_size = _get_blocksize(base + index);
+		index += 3;
 
 		if (index + current_size > total)
 			return NULL;
@@ -73,16 +105,6 @@
 	return NULL;
 }
 
-static u16
-get_blocksize(const void *p)
-{
-	u16 *block_ptr, block_size;
-
-	block_ptr = (u16 *)((char *)p - 2);
-	block_size = *block_ptr;
-	return block_size;
-}
-
 static void
 fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
 			const struct lvds_dvo_timing *dvo_timing)
@@ -675,84 +697,13 @@
 	dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
 }
 
-static u8 *goto_next_sequence(u8 *data, int *size)
-{
-	u16 len;
-	int tmp = *size;
-
-	if (--tmp < 0)
-		return NULL;
-
-	/* goto first element */
-	data++;
-	while (1) {
-		switch (*data) {
-		case MIPI_SEQ_ELEM_SEND_PKT:
-			/*
-			 * skip by this element payload size
-			 * skip elem id, command flag and data type
-			 */
-			tmp -= 5;
-			if (tmp < 0)
-				return NULL;
-
-			data += 3;
-			len = *((u16 *)data);
-
-			tmp -= len;
-			if (tmp < 0)
-				return NULL;
-
-			/* skip by len */
-			data = data + 2 + len;
-			break;
-		case MIPI_SEQ_ELEM_DELAY:
-			/* skip by elem id, and delay is 4 bytes */
-			tmp -= 5;
-			if (tmp < 0)
-				return NULL;
-
-			data += 5;
-			break;
-		case MIPI_SEQ_ELEM_GPIO:
-			tmp -= 3;
-			if (tmp < 0)
-				return NULL;
-
-			data += 3;
-			break;
-		default:
-			DRM_ERROR("Unknown element\n");
-			return NULL;
-		}
-
-		/* end of sequence ? */
-		if (*data == 0)
-			break;
-	}
-
-	/* goto next sequence or end of block byte */
-	if (--tmp < 0)
-		return NULL;
-
-	data++;
-
-	/* update amount of data left for the sequence block to be parsed */
-	*size = tmp;
-	return data;
-}
-
 static void
-parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_mipi_config(struct drm_i915_private *dev_priv,
+		  const struct bdb_header *bdb)
 {
 	const struct bdb_mipi_config *start;
-	const struct bdb_mipi_sequence *sequence;
 	const struct mipi_config *config;
 	const struct mipi_pps_data *pps;
-	u8 *data;
-	const u8 *seq_data;
-	int i, panel_id, seq_size;
-	u16 block_size;
 
 	/* parse MIPI blocks only if LFP type is MIPI */
 	if (!dev_priv->vbt.has_mipi)
@@ -798,8 +749,178 @@
 
 	/* We have mandatory mipi config blocks. Initialize as generic panel */
 	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
 
-	/* Check if we have sequence block as well */
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+			  u16 panel_id, u32 *seq_size)
+{
+	u32 total = get_blocksize(sequence);
+	const u8 *data = &sequence->data[0];
+	u8 current_id;
+	u32 current_size;
+	int header_size = sequence->version >= 3 ? 5 : 3;
+	int index = 0;
+	int i;
+
+	/* skip new block size */
+	if (sequence->version >= 3)
+		data += 4;
+
+	for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+		if (index + header_size > total) {
+			DRM_ERROR("Invalid sequence block (header)\n");
+			return NULL;
+		}
+
+		current_id = *(data + index);
+		if (sequence->version >= 3)
+			current_size = *((const u32 *)(data + index + 1));
+		else
+			current_size = *((const u16 *)(data + index + 1));
+
+		index += header_size;
+
+		if (index + current_size > total) {
+			DRM_ERROR("Invalid sequence block\n");
+			return NULL;
+		}
+
+		if (current_id == panel_id) {
+			*seq_size = current_size;
+			return data + index;
+		}
+
+		index += current_size;
+	}
+
+	DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+	return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+	u16 len;
+
+	/* Skip Sequence Byte. */
+	for (index = index + 1; index < total; index += len) {
+		u8 operation_byte = *(data + index);
+		index++;
+
+		switch (operation_byte) {
+		case MIPI_SEQ_ELEM_END:
+			return index;
+		case MIPI_SEQ_ELEM_SEND_PKT:
+			if (index + 4 > total)
+				return 0;
+
+			len = *((const u16 *)(data + index + 2)) + 4;
+			break;
+		case MIPI_SEQ_ELEM_DELAY:
+			len = 4;
+			break;
+		case MIPI_SEQ_ELEM_GPIO:
+			len = 2;
+			break;
+		case MIPI_SEQ_ELEM_I2C:
+			if (index + 7 > total)
+				return 0;
+			len = *(data + index + 6) + 7;
+			break;
+		default:
+			DRM_ERROR("Unknown operation byte\n");
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+	int seq_end;
+	u16 len;
+	u32 size_of_sequence;
+
+	/*
+	 * Could skip sequence based on Size of Sequence alone, but also do some
+	 * checking on the structure.
+	 */
+	if (total < 5) {
+		DRM_ERROR("Too small sequence size\n");
+		return 0;
+	}
+
+	/* Skip Sequence Byte. */
+	index++;
+
+	/*
+	 * Size of Sequence. Excludes the Sequence Byte and the size itself,
+	 * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+	 * byte.
+	 */
+	size_of_sequence = *((const uint32_t *)(data + index));
+	index += 4;
+
+	seq_end = index + size_of_sequence;
+	if (seq_end > total) {
+		DRM_ERROR("Invalid sequence size\n");
+		return 0;
+	}
+
+	for (; index < total; index += len) {
+		u8 operation_byte = *(data + index);
+		index++;
+
+		if (operation_byte == MIPI_SEQ_ELEM_END) {
+			if (index != seq_end) {
+				DRM_ERROR("Invalid element structure\n");
+				return 0;
+			}
+			return index;
+		}
+
+		len = *(data + index);
+		index++;
+
+		/*
+		 * FIXME: Would be nice to check elements like for v1/v2 in
+		 * goto_next_sequence() above.
+		 */
+		switch (operation_byte) {
+		case MIPI_SEQ_ELEM_SEND_PKT:
+		case MIPI_SEQ_ELEM_DELAY:
+		case MIPI_SEQ_ELEM_GPIO:
+		case MIPI_SEQ_ELEM_I2C:
+		case MIPI_SEQ_ELEM_SPI:
+		case MIPI_SEQ_ELEM_PMIC:
+			break;
+		default:
+			DRM_ERROR("Unknown operation byte %u\n",
+				  operation_byte);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *dev_priv,
+		    const struct bdb_header *bdb)
+{
+	const struct bdb_mipi_sequence *sequence;
+	const u8 *seq_data;
+	u32 seq_size;
+	u8 *data;
+	int index = 0;
+
+	/* Only our generic panel driver uses the sequence block. */
+	if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+		return;
+
 	sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
 	if (!sequence) {
 		DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
@@ -807,95 +928,54 @@
 	}
 
 	/* Fail gracefully for forward incompatible sequence block. */
-	if (sequence->version >= 3) {
-		DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+	if (sequence->version >= 4) {
+		DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
+			  sequence->version);
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+	DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
 
-	block_size = get_blocksize(sequence);
+	seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+	if (!seq_data)
+		return;
 
-	/*
-	 * parse the sequence block for individual sequences
-	 */
-	dev_priv->vbt.dsi.seq_version = sequence->version;
+	data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+	if (!data)
+		return;
 
-	seq_data = &sequence->data[0];
-
-	/*
-	 * sequence block is variable length and hence we need to parse and
-	 * get the sequence data for specific panel id
-	 */
-	for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
-		panel_id = *seq_data;
-		seq_size = *((u16 *) (seq_data + 1));
-		if (panel_id == panel_type)
+	/* Parse the sequences, store pointers to each sequence. */
+	for (;;) {
+		u8 seq_id = *(data + index);
+		if (seq_id == MIPI_SEQ_END)
 			break;
 
-		/* skip the sequence including seq header of 3 bytes */
-		seq_data = seq_data + 3 + seq_size;
-		if ((seq_data - &sequence->data[0]) > block_size) {
-			DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
-			return;
+		if (seq_id >= MIPI_SEQ_MAX) {
+			DRM_ERROR("Unknown sequence %u\n", seq_id);
+			goto err;
+		}
+
+		dev_priv->vbt.dsi.sequence[seq_id] = data + index;
+
+		if (sequence->version >= 3)
+			index = goto_next_sequence_v3(data, index, seq_size);
+		else
+			index = goto_next_sequence(data, index, seq_size);
+		if (!index) {
+			DRM_ERROR("Invalid sequence %u\n", seq_id);
+			goto err;
 		}
 	}
 
-	if (i == MAX_MIPI_CONFIGURATIONS) {
-		DRM_ERROR("Sequence block detected but no valid configuration\n");
-		return;
-	}
-
-	/* check if found sequence is completely within the sequence block
-	 * just being paranoid */
-	if (seq_size > block_size) {
-		DRM_ERROR("Corrupted sequence/size, bailing out\n");
-		return;
-	}
-
-	/* skip the panel id(1 byte) and seq size(2 bytes) */
-	dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
-	if (!dev_priv->vbt.dsi.data)
-		return;
-
-	/*
-	 * loop into the sequence data and split into multiple sequneces
-	 * There are only 5 types of sequences as of now
-	 */
-	data = dev_priv->vbt.dsi.data;
+	dev_priv->vbt.dsi.data = data;
 	dev_priv->vbt.dsi.size = seq_size;
+	dev_priv->vbt.dsi.seq_version = sequence->version;
 
-	/* two consecutive 0x00 indicate end of all sequences */
-	while (1) {
-		int seq_id = *data;
-		if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
-			dev_priv->vbt.dsi.sequence[seq_id] = data;
-			DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
-		} else {
-			DRM_ERROR("undefined sequence\n");
-			goto err;
-		}
-
-		/* partial parsing to skip elements */
-		data = goto_next_sequence(data, &seq_size);
-
-		if (data == NULL) {
-			DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
-			goto err;
-		}
-
-		if (*data == 0)
-			break; /* end of sequence reached */
-	}
-
-	DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+	DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
 	return;
-err:
-	kfree(dev_priv->vbt.dsi.data);
-	dev_priv->vbt.dsi.data = NULL;
 
-	/* error during parsing so set all pointers to null
-	 * because of partial parsing */
+err:
+	kfree(data);
 	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
@@ -1088,7 +1168,12 @@
 		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
 		return;
 	}
-	if (bdb->version < 195) {
+	if (bdb->version < 106) {
+		expected_size = 22;
+	} else if (bdb->version < 109) {
+		expected_size = 27;
+	} else if (bdb->version < 195) {
+		BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
 		expected_size = sizeof(struct old_child_dev_config);
 	} else if (bdb->version == 195) {
 		expected_size = 37;
@@ -1101,18 +1186,18 @@
 				 bdb->version, expected_size);
 	}
 
-	/* The legacy sized child device config is the minimum we need. */
-	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
-		DRM_ERROR("Child device config size %u is too small.\n",
-			  p_defs->child_dev_size);
-		return;
-	}
-
 	/* Flag an error for unexpected size, but continue anyway. */
 	if (p_defs->child_dev_size != expected_size)
 		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
 			  p_defs->child_dev_size, expected_size, bdb->version);
 
+	/* The legacy sized child device config is the minimum we need. */
+	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+		DRM_DEBUG_KMS("Child device config size %u is too small.\n",
+			      p_defs->child_dev_size);
+		return;
+	}
+
 	/* get the block size of general definitions */
 	block_size = get_blocksize(p_defs);
 	/* get the number of child device */
@@ -1285,7 +1370,7 @@
 
 /**
  * intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev: DRM device
+ * @dev_priv: i915 device instance
  *
  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
  * to appropriate values.
@@ -1337,7 +1422,8 @@
 	parse_driver_features(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
 	parse_psr(dev_priv, bdb);
-	parse_mipi(dev_priv, bdb);
+	parse_mipi_config(dev_priv, bdb);
+	parse_mipi_sequence(dev_priv, bdb);
 	parse_ddi_ports(dev_priv, bdb);
 
 	if (bios)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 54eac10..350d4e0 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -25,25 +25,43 @@
  *
  */
 
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
 
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature:		VBT signature, always starts with "$VBT"
+ * @version:		Version of this structure
+ * @header_size:	Size of this structure
+ * @vbt_size:		Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum:	Checksum
+ * @reserved0:		Reserved
+ * @bdb_offset:		Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset:		Offsets of add-in data blocks from beginning of VBT
+ */
 struct vbt_header {
-	u8 signature[20];		/**< Always starts with 'VBT$' */
-	u16 version;			/**< decimal */
-	u16 header_size;		/**< in bytes */
-	u16 vbt_size;			/**< in bytes */
+	u8 signature[20];
+	u16 version;
+	u16 header_size;
+	u16 vbt_size;
 	u8 vbt_checksum;
 	u8 reserved0;
-	u32 bdb_offset;			/**< from beginning of VBT */
-	u32 aim_offset[4];		/**< from beginning of VBT */
+	u32 bdb_offset;
+	u32 aim_offset[4];
 } __packed;
 
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature:		BDB signature "BIOS_DATA_BLOCK"
+ * @version:		Version of the data block definitions
+ * @header_size:	Size of this structure
+ * @bdb_size:		Size of BDB (BDB Header and data blocks)
+ */
 struct bdb_header {
-	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
-	u16 version;			/**< decimal */
-	u16 header_size;		/**< in bytes */
-	u16 bdb_size;			/**< in bytes */
+	u8 signature[16];
+	u16 version;
+	u16 header_size;
+	u16 bdb_size;
 } __packed;
 
 /* strictly speaking, this is a "skip" block, but it has interesting info */
@@ -936,21 +954,29 @@
 
 /* MIPI Sequnece Block definitions */
 enum mipi_seq {
-	MIPI_SEQ_UNDEFINED = 0,
+	MIPI_SEQ_END = 0,
 	MIPI_SEQ_ASSERT_RESET,
 	MIPI_SEQ_INIT_OTP,
 	MIPI_SEQ_DISPLAY_ON,
 	MIPI_SEQ_DISPLAY_OFF,
 	MIPI_SEQ_DEASSERT_RESET,
+	MIPI_SEQ_BACKLIGHT_ON,		/* sequence block v2+ */
+	MIPI_SEQ_BACKLIGHT_OFF,		/* sequence block v2+ */
+	MIPI_SEQ_TEAR_ON,		/* sequence block v2+ */
+	MIPI_SEQ_TEAR_OFF,		/* sequence block v3+ */
+	MIPI_SEQ_POWER_ON,		/* sequence block v3+ */
+	MIPI_SEQ_POWER_OFF,		/* sequence block v3+ */
 	MIPI_SEQ_MAX
 };
 
 enum mipi_seq_element {
-	MIPI_SEQ_ELEM_UNDEFINED = 0,
+	MIPI_SEQ_ELEM_END = 0,
 	MIPI_SEQ_ELEM_SEND_PKT,
 	MIPI_SEQ_ELEM_DELAY,
 	MIPI_SEQ_ELEM_GPIO,
-	MIPI_SEQ_ELEM_STATUS,
+	MIPI_SEQ_ELEM_I2C,		/* sequence block v2+ */
+	MIPI_SEQ_ELEM_SPI,		/* sequence block v3+ */
+	MIPI_SEQ_ELEM_PMIC,		/* sequence block v3+ */
 	MIPI_SEQ_ELEM_MAX
 };
 
@@ -965,4 +991,4 @@
 	MIPI_GPIO_MAX
 };
 
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a7b4a524..505fc5c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -213,9 +213,7 @@
 
 static void intel_enable_crt(struct intel_encoder *encoder)
 {
-	struct intel_crt *crt = intel_encoder_to_crt(encoder);
-
-	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
 static enum drm_mode_status
@@ -223,6 +221,7 @@
 		     struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
+	int max_dotclk = to_i915(dev)->max_dotclk_freq;
 
 	int max_clock = 0;
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -238,6 +237,9 @@
 	if (mode->clock > max_clock)
 		return MODE_CLOCK_HIGH;
 
+	if (mode->clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
+
 	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
 	if (HAS_PCH_LPT(dev) &&
 	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
@@ -476,11 +478,10 @@
 }
 
 static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt)
+intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
 {
 	struct drm_device *dev = crt->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
 	uint32_t save_bclrpat;
 	uint32_t save_vtotal;
 	uint32_t vtotal, vactive;
@@ -649,7 +650,8 @@
 		if (intel_crt_detect_ddc(connector))
 			status = connector_status_connected;
 		else if (INTEL_INFO(dev)->gen < 4)
-			status = intel_crt_load_detect(crt);
+			status = intel_crt_load_detect(crt,
+				to_intel_crtc(connector->state->crtc)->pipe);
 		else
 			status = connector_status_unknown;
 		intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 647d85e..902054e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,6 +44,8 @@
 #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
 #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 
+#define FIRMWARE_URL  "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+
 MODULE_FIRMWARE(I915_CSR_SKL);
 MODULE_FIRMWARE(I915_CSR_BXT);
 
@@ -177,7 +179,8 @@
 static const struct stepping_info skl_stepping_info[] = {
 	{'A', '0'}, {'B', '0'}, {'C', '0'},
 	{'D', '0'}, {'E', '0'}, {'F', '0'},
-	{'G', '0'}, {'H', '0'}, {'I', '0'}
+	{'G', '0'}, {'H', '0'}, {'I', '0'},
+	{'J', '0'}, {'K', '0'}
 };
 
 static const struct stepping_info bxt_stepping_info[] = {
@@ -217,19 +220,19 @@
  * Everytime display comes back from low power state this function is called to
  * copy the firmware from internal memory to registers.
  */
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+bool intel_csr_load_program(struct drm_i915_private *dev_priv)
 {
 	u32 *payload = dev_priv->csr.dmc_payload;
 	uint32_t i, fw_size;
 
 	if (!IS_GEN9(dev_priv)) {
 		DRM_ERROR("No CSR support available for this platform\n");
-		return;
+		return false;
 	}
 
 	if (!dev_priv->csr.dmc_payload) {
 		DRM_ERROR("Tried to program CSR with empty payload\n");
-		return;
+		return false;
 	}
 
 	fw_size = dev_priv->csr.dmc_fw_size;
@@ -242,6 +245,8 @@
 	}
 
 	dev_priv->csr.dc_state = 0;
+
+	return true;
 }
 
 static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
@@ -280,10 +285,11 @@
 
 	csr->version = css_header->version;
 
-	if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
+	if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+	    csr->version < SKL_CSR_VERSION_REQUIRED) {
 		DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
 			 " please upgrade to v%u.%u or later"
-			 " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
+			   " [" FIRMWARE_URL "].\n",
 			 CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version),
 			 CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
@@ -401,7 +407,10 @@
 			 CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version));
 	} else {
-		DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
+		dev_notice(dev_priv->dev->dev,
+			   "Failed to load DMC firmware"
+			   " [" FIRMWARE_URL "],"
+			   " disabling runtime power management.\n");
 	}
 
 	release_firmware(fw);
@@ -423,7 +432,7 @@
 	if (!HAS_CSR(dev_priv))
 		return;
 
-	if (IS_SKYLAKE(dev_priv))
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 		csr->fw_path = I915_CSR_SKL;
 	else if (IS_BROXTON(dev_priv))
 		csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 084d558..62de9f4 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -133,38 +133,38 @@
 	{ 0x00002016, 0x000000A0, 0x0 },
 	{ 0x00005012, 0x0000009B, 0x0 },
 	{ 0x00007011, 0x00000088, 0x0 },
-	{ 0x80009010, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80009010, 0x000000C0, 0x1 },
 	{ 0x00002016, 0x0000009B, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
-	{ 0x80007011, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80007011, 0x000000C0, 0x1 },
 	{ 0x00002016, 0x000000DF, 0x0 },
-	{ 0x80005012, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80005012, 0x000000C0, 0x1 },
 };
 
 /* Skylake U */
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
 	{ 0x0000201B, 0x000000A2, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
-	{ 0x00007011, 0x00000087, 0x0 },
-	{ 0x80009010, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80007011, 0x000000CD, 0x0 },
+	{ 0x80009010, 0x000000C0, 0x1 },
 	{ 0x0000201B, 0x0000009D, 0x0 },
-	{ 0x80005012, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
-	{ 0x80007011, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80005012, 0x000000C0, 0x1 },
+	{ 0x80007011, 0x000000C0, 0x1 },
 	{ 0x00002016, 0x00000088, 0x0 },
-	{ 0x80005012, 0x000000C0, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x80005012, 0x000000C0, 0x1 },
 };
 
 /* Skylake Y */
 static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
 	{ 0x00000018, 0x000000A2, 0x0 },
 	{ 0x00005012, 0x00000088, 0x0 },
-	{ 0x00007011, 0x00000087, 0x0 },
-	{ 0x80009010, 0x000000C0, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x80007011, 0x000000CD, 0x0 },
+	{ 0x80009010, 0x000000C0, 0x3 },
 	{ 0x00000018, 0x0000009D, 0x0 },
-	{ 0x80005012, 0x000000C0, 0x3 },	/* Uses I_boost level 0x3 */
-	{ 0x80007011, 0x000000C0, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x80005012, 0x000000C0, 0x3 },
+	{ 0x80007011, 0x000000C0, 0x3 },
 	{ 0x00000018, 0x00000088, 0x0 },
-	{ 0x80005012, 0x000000C0, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x80005012, 0x000000C0, 0x3 },
 };
 
 /*
@@ -226,26 +226,26 @@
 	{ 0x00000018, 0x000000A1, 0x0 },
 	{ 0x00000018, 0x00000098, 0x0 },
 	{ 0x00004013, 0x00000088, 0x0 },
-	{ 0x00006012, 0x00000087, 0x0 },
+	{ 0x80006012, 0x000000CD, 0x1 },
 	{ 0x00000018, 0x000000DF, 0x0 },
-	{ 0x00003015, 0x00000087, 0x0 },	/* Default */
-	{ 0x00003015, 0x000000C7, 0x0 },
-	{ 0x00000018, 0x000000C7, 0x0 },
+	{ 0x80003015, 0x000000CD, 0x1 },	/* Default */
+	{ 0x80003015, 0x000000C0, 0x1 },
+	{ 0x80000018, 0x000000C0, 0x1 },
 };
 
 /* Skylake Y */
 static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
 	{ 0x00000018, 0x000000A1, 0x0 },
 	{ 0x00005012, 0x000000DF, 0x0 },
-	{ 0x00007011, 0x00000084, 0x0 },
+	{ 0x80007011, 0x000000CB, 0x3 },
 	{ 0x00000018, 0x000000A4, 0x0 },
 	{ 0x00000018, 0x0000009D, 0x0 },
 	{ 0x00004013, 0x00000080, 0x0 },
-	{ 0x00006013, 0x000000C7, 0x0 },
+	{ 0x80006013, 0x000000C0, 0x3 },
 	{ 0x00000018, 0x0000008A, 0x0 },
-	{ 0x00003015, 0x000000C7, 0x0 },	/* Default */
-	{ 0x80003015, 0x000000C7, 0x7 },	/* Uses I_boost level 0x7 */
-	{ 0x00000018, 0x000000C7, 0x0 },
+	{ 0x80003015, 0x000000C0, 0x3 },	/* Default */
+	{ 0x80003015, 0x000000C0, 0x3 },
+	{ 0x80000018, 0x000000C0, 0x3 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -301,8 +301,8 @@
 	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
 };
 
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-				    enum port port, int type);
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+				    u32 level, enum port port, int type);
 
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
 				 struct intel_digital_port **dig_port,
@@ -342,81 +342,50 @@
 	return port;
 }
 
-static bool
-intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
-}
-
-static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
-							int *n_entries)
-{
-	const struct ddi_buf_trans *ddi_translations;
-
-	if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-		ddi_translations = skl_y_ddi_translations_dp;
+	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
 		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
-	} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
-		ddi_translations = skl_u_ddi_translations_dp;
+		return skl_y_ddi_translations_dp;
+	} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
 		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+		return skl_u_ddi_translations_dp;
 	} else {
-		ddi_translations = skl_ddi_translations_dp;
 		*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+		return skl_ddi_translations_dp;
 	}
-
-	return ddi_translations;
-}
-
-static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
-							 int *n_entries)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct ddi_buf_trans *ddi_translations;
-
-	if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations = skl_y_ddi_translations_edp;
-			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
-		} else {
-			ddi_translations = skl_y_ddi_translations_dp;
-			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
-		}
-	} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations = skl_u_ddi_translations_edp;
-			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
-		} else {
-			ddi_translations = skl_u_ddi_translations_dp;
-			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
-		}
-	} else {
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations = skl_ddi_translations_edp;
-			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-		} else {
-			ddi_translations = skl_ddi_translations_dp;
-			*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		}
-	}
-
-	return ddi_translations;
 }
 
 static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_device *dev,
-		       int *n_entries)
+skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	const struct ddi_buf_trans *ddi_translations;
-
-	if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-		ddi_translations = skl_y_ddi_translations_hdmi;
-		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
-	} else {
-		ddi_translations = skl_ddi_translations_hdmi;
-		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+	if (dev_priv->edp_low_vswing) {
+		if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+			return skl_y_ddi_translations_edp;
+		} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+			return skl_u_ddi_translations_edp;
+		} else {
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+			return skl_ddi_translations_edp;
+		}
 	}
 
-	return ddi_translations;
+	return skl_get_buf_trans_dp(dev_priv, n_entries);
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+	if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+		return skl_y_ddi_translations_hdmi;
+	} else {
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+		return skl_ddi_translations_hdmi;
+	}
 }
 
 /*
@@ -426,42 +395,52 @@
  * in either FDI or DP modes only, as HDMI connections will work with both
  * of those
  */
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
-				      bool supports_hdmi)
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 iboost_bit = 0;
 	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
 	    size;
-	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	int hdmi_level;
+	enum port port;
 	const struct ddi_buf_trans *ddi_translations_fdi;
 	const struct ddi_buf_trans *ddi_translations_dp;
 	const struct ddi_buf_trans *ddi_translations_edp;
 	const struct ddi_buf_trans *ddi_translations_hdmi;
 	const struct ddi_buf_trans *ddi_translations;
 
-	if (IS_BROXTON(dev)) {
-		if (!supports_hdmi)
+	port = intel_ddi_get_encoder_port(encoder);
+	hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+	if (IS_BROXTON(dev_priv)) {
+		if (encoder->type != INTEL_OUTPUT_HDMI)
 			return;
 
 		/* Vswing programming for HDMI */
-		bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+		bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
 					INTEL_OUTPUT_HDMI);
 		return;
-	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	}
+
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		ddi_translations_fdi = NULL;
 		ddi_translations_dp =
-				skl_get_buf_trans_dp(dev, &n_dp_entries);
+				skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
 		ddi_translations_edp =
-				skl_get_buf_trans_edp(dev, &n_edp_entries);
+				skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
 		ddi_translations_hdmi =
-				skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+				skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
 		hdmi_default_entry = 8;
 		/* If we're boosting the current, set bit 31 of trans1 */
 		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
 		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
 			iboost_bit = 1<<31;
-	} else if (IS_BROADWELL(dev)) {
+
+		if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+			    port != PORT_A && port != PORT_E &&
+			    n_edp_entries > 9))
+			n_edp_entries = 9;
+	} else if (IS_BROADWELL(dev_priv)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
 		ddi_translations_edp = bdw_ddi_translations_edp;
@@ -470,7 +449,7 @@
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 		hdmi_default_entry = 7;
-	} else if (IS_HASWELL(dev)) {
+	} else if (IS_HASWELL(dev_priv)) {
 		ddi_translations_fdi = hsw_ddi_translations_fdi;
 		ddi_translations_dp = hsw_ddi_translations_dp;
 		ddi_translations_edp = hsw_ddi_translations_dp;
@@ -490,30 +469,18 @@
 		hdmi_default_entry = 7;
 	}
 
-	switch (port) {
-	case PORT_A:
+	switch (encoder->type) {
+	case INTEL_OUTPUT_EDP:
 		ddi_translations = ddi_translations_edp;
 		size = n_edp_entries;
 		break;
-	case PORT_B:
-	case PORT_C:
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_HDMI:
 		ddi_translations = ddi_translations_dp;
 		size = n_dp_entries;
 		break;
-	case PORT_D:
-		if (intel_dp_is_edp(dev, PORT_D)) {
-			ddi_translations = ddi_translations_edp;
-			size = n_edp_entries;
-		} else {
-			ddi_translations = ddi_translations_dp;
-			size = n_dp_entries;
-		}
-		break;
-	case PORT_E:
-		if (ddi_translations_fdi)
-			ddi_translations = ddi_translations_fdi;
-		else
-			ddi_translations = ddi_translations_dp;
+	case INTEL_OUTPUT_ANALOG:
+		ddi_translations = ddi_translations_fdi;
 		size = n_dp_entries;
 		break;
 	default:
@@ -527,7 +494,7 @@
 			   ddi_translations[i].trans2);
 	}
 
-	if (!supports_hdmi)
+	if (encoder->type != INTEL_OUTPUT_HDMI)
 		return;
 
 	/* Choose a good default if VBT is badly populated */
@@ -542,37 +509,6 @@
 		   ddi_translations_hdmi[hdmi_level].trans2);
 }
 
-/* Program DDI buffers translations for DP. By default, program ports A-D in DP
- * mode and port E for FDI.
- */
-void intel_prepare_ddi(struct drm_device *dev)
-{
-	struct intel_encoder *intel_encoder;
-	bool visited[I915_MAX_PORTS] = { 0, };
-
-	if (!HAS_DDI(dev))
-		return;
-
-	for_each_intel_encoder(dev, intel_encoder) {
-		struct intel_digital_port *intel_dig_port;
-		enum port port;
-		bool supports_hdmi;
-
-		if (intel_encoder->type == INTEL_OUTPUT_DSI)
-			continue;
-
-		ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
-		if (visited[port])
-			continue;
-
-		supports_hdmi = intel_dig_port &&
-				intel_dig_port_supports_hdmi(intel_dig_port);
-
-		intel_prepare_ddi_buffers(dev, port, supports_hdmi);
-		visited[port] = true;
-	}
-}
-
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 				    enum port port)
 {
@@ -601,8 +537,14 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
 	u32 temp, i, rx_ctl_val;
 
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
+		intel_prepare_ddi_buffer(encoder);
+	}
+
 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
 	 * mode set "sequence for CRT port" document:
 	 * - TP1 to TP2 time with the default value
@@ -1604,8 +1546,10 @@
 		}
 
 		cfgcr1 = cfgcr2 = 0;
-	} else /* eDP */
+	} else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
 		return true;
+	} else
+		return false;
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
@@ -2109,10 +2053,9 @@
 			   TRANS_CLK_SEL_DISABLED);
 }
 
-static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
-			       enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+			       u32 level, enum port port, int type)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct ddi_buf_trans *ddi_translations;
 	uint8_t iboost;
 	uint8_t dp_iboost, hdmi_iboost;
@@ -2127,21 +2070,26 @@
 		if (dp_iboost) {
 			iboost = dp_iboost;
 		} else {
-			ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+			ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
 			iboost = ddi_translations[level].i_boost;
 		}
 	} else if (type == INTEL_OUTPUT_EDP) {
 		if (dp_iboost) {
 			iboost = dp_iboost;
 		} else {
-			ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+			ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+
+			if (WARN_ON(port != PORT_A &&
+				    port != PORT_E && n_entries > 9))
+				n_entries = 9;
+
 			iboost = ddi_translations[level].i_boost;
 		}
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		if (hdmi_iboost) {
 			iboost = hdmi_iboost;
 		} else {
-			ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+			ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
 			iboost = ddi_translations[level].i_boost;
 		}
 	} else {
@@ -2166,10 +2114,9 @@
 	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
 }
 
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-				    enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+				    u32 level, enum port port, int type)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct bxt_ddi_buf_trans *ddi_translations;
 	u32 n_entries, i;
 	uint32_t val;
@@ -2284,7 +2231,7 @@
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dport->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
 	struct intel_encoder *encoder = &dport->base;
 	uint8_t train_set = intel_dp->train_set[0];
 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
@@ -2294,10 +2241,10 @@
 
 	level = translate_signal_level(signal_levels);
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-		skl_ddi_set_iboost(dev, level, port, encoder->type);
-	else if (IS_BROXTON(dev))
-		bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+		skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+	else if (IS_BROXTON(dev_priv))
+		bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 
 	return DDI_BUF_TRANS_SELECT(level);
 }
@@ -2349,12 +2296,12 @@
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_device *dev = encoder->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
 	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
 	enum port port = intel_ddi_get_encoder_port(intel_encoder);
 	int type = intel_encoder->type;
-	int hdmi_level;
+
+	intel_prepare_ddi_buffer(intel_encoder);
 
 	if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2372,17 +2319,11 @@
 
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 		intel_dp_start_link_train(intel_dp);
-		if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
+		if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
 			intel_dp_stop_link_train(intel_dp);
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
-		if (IS_BROXTON(dev)) {
-			hdmi_level = dev_priv->vbt.
-				ddi_port_info[port].hdmi_level_shift;
-			bxt_ddi_vswing_sequence(dev, hdmi_level, port,
-					INTEL_OUTPUT_HDMI);
-		}
 		intel_hdmi->set_infoframes(encoder,
 					   crtc->config->has_hdmi_sink,
 					   &crtc->config->base.adjusted_mode);
@@ -3329,6 +3270,33 @@
 	struct intel_encoder *intel_encoder;
 	struct drm_encoder *encoder;
 	bool init_hdmi, init_dp;
+	int max_lanes;
+
+	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
+		switch (port) {
+		case PORT_A:
+			max_lanes = 4;
+			break;
+		case PORT_E:
+			max_lanes = 0;
+			break;
+		default:
+			max_lanes = 4;
+			break;
+		}
+	} else {
+		switch (port) {
+		case PORT_A:
+			max_lanes = 2;
+			break;
+		case PORT_E:
+			max_lanes = 2;
+			break;
+		default:
+			max_lanes = 4;
+			break;
+		}
+	}
 
 	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@@ -3373,9 +3341,12 @@
 		if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
 			DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
 			intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
+			max_lanes = 4;
 		}
 	}
 
+	intel_dig_port->max_lanes = max_lanes;
+
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 46947ff..6e0d828 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -85,8 +85,6 @@
 	DRM_FORMAT_ARGB8888,
 };
 
-static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
-
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_state *pipe_config);
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -1152,11 +1150,6 @@
 	}
 }
 
-static const char *state_string(bool enabled)
-{
-	return enabled ? "on" : "off";
-}
-
 /* Only for pre-ILK configs */
 void assert_pll(struct drm_i915_private *dev_priv,
 		enum pipe pipe, bool state)
@@ -1168,7 +1161,7 @@
 	cur_state = !!(val & DPLL_VCO_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "PLL state assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 
 /* XXX: the dsi pll is shared between MIPI DSI ports */
@@ -1184,7 +1177,7 @@
 	cur_state = val & DSI_PLL_VCO_EN;
 	I915_STATE_WARN(cur_state != state,
 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
@@ -1208,14 +1201,13 @@
 	bool cur_state;
 	struct intel_dpll_hw_state hw_state;
 
-	if (WARN (!pll,
-		  "asserting DPLL %s with no DPLL\n", state_string(state)))
+	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
 		return;
 
 	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
 	I915_STATE_WARN(cur_state != state,
 	     "%s assertion failure (expected %s, current %s)\n",
-	     pll->name, state_string(state), state_string(cur_state));
+			pll->name, onoff(state), onoff(cur_state));
 }
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
@@ -1235,7 +1227,7 @@
 	}
 	I915_STATE_WARN(cur_state != state,
 	     "FDI TX state assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
@@ -1250,7 +1242,7 @@
 	cur_state = !!(val & FDI_RX_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "FDI RX state assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
@@ -1282,7 +1274,7 @@
 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
-	     state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1340,7 +1332,7 @@
 
 	I915_STATE_WARN(cur_state != state,
 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
-	     pipe_name(pipe), state_string(state), state_string(cur_state));
+			pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
@@ -1370,7 +1362,7 @@
 
 	I915_STATE_WARN(cur_state != state,
 	     "pipe %c assertion failure (expected %s, current %s)\n",
-	     pipe_name(pipe), state_string(state), state_string(cur_state));
+			pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 
 static void assert_plane(struct drm_i915_private *dev_priv,
@@ -1383,7 +1375,7 @@
 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "plane %c assertion failure (expected %s, current %s)\n",
-	     plane_name(plane), state_string(state), state_string(cur_state));
+			plane_name(plane), onoff(state), onoff(cur_state));
 }
 
 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
@@ -2156,6 +2148,17 @@
 
 	I915_WRITE(reg, val | PIPECONF_ENABLE);
 	POSTING_READ(reg);
+
+	/*
+	 * Until the pipe starts DSL will read as 0, which would cause
+	 * an apparent vblank timestamp jump, which messes up also the
+	 * frame count when it's derived from the timestamps. So let's
+	 * wait for the pipe to start properly before we call
+	 * drm_crtc_vblank_on()
+	 */
+	if (dev->max_vblank_count == 0 &&
+	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
+		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
 }
 
 /**
@@ -2217,67 +2220,75 @@
 	return false;
 }
 
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-		  uint64_t fb_format_modifier, unsigned int plane)
+static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 {
-	unsigned int tile_height;
-	uint32_t pixel_bytes;
+	return IS_GEN2(dev_priv) ? 2048 : 4096;
+}
 
-	switch (fb_format_modifier) {
+static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
+				     uint64_t fb_modifier, unsigned int cpp)
+{
+	switch (fb_modifier) {
 	case DRM_FORMAT_MOD_NONE:
-		tile_height = 1;
-		break;
+		return cpp;
 	case I915_FORMAT_MOD_X_TILED:
-		tile_height = IS_GEN2(dev) ? 16 : 8;
-		break;
+		if (IS_GEN2(dev_priv))
+			return 128;
+		else
+			return 512;
 	case I915_FORMAT_MOD_Y_TILED:
-		tile_height = 32;
-		break;
+		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+			return 128;
+		else
+			return 512;
 	case I915_FORMAT_MOD_Yf_TILED:
-		pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
-		switch (pixel_bytes) {
-		default:
+		switch (cpp) {
 		case 1:
-			tile_height = 64;
-			break;
+			return 64;
 		case 2:
 		case 4:
-			tile_height = 32;
-			break;
+			return 128;
 		case 8:
-			tile_height = 16;
-			break;
 		case 16:
-			WARN_ONCE(1,
-				  "128-bit pixels are not supported for display!");
-			tile_height = 16;
-			break;
+			return 256;
+		default:
+			MISSING_CASE(cpp);
+			return cpp;
 		}
 		break;
 	default:
-		MISSING_CASE(fb_format_modifier);
-		tile_height = 1;
-		break;
+		MISSING_CASE(fb_modifier);
+		return cpp;
 	}
+}
 
-	return tile_height;
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+			       uint64_t fb_modifier, unsigned int cpp)
+{
+	if (fb_modifier == DRM_FORMAT_MOD_NONE)
+		return 1;
+	else
+		return intel_tile_size(dev_priv) /
+			intel_tile_width(dev_priv, fb_modifier, cpp);
 }
 
 unsigned int
 intel_fb_align_height(struct drm_device *dev, unsigned int height,
-		      uint32_t pixel_format, uint64_t fb_format_modifier)
+		      uint32_t pixel_format, uint64_t fb_modifier)
 {
-	return ALIGN(height, intel_tile_height(dev, pixel_format,
-					       fb_format_modifier, 0));
+	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
+	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+
+	return ALIGN(height, tile_height);
 }
 
 static void
 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
 			const struct drm_plane_state *plane_state)
 {
-	struct intel_rotation_info *info = &view->params.rotation_info;
-	unsigned int tile_height, tile_pitch;
+	struct drm_i915_private *dev_priv = to_i915(fb->dev);
+	struct intel_rotation_info *info = &view->params.rotated;
+	unsigned int tile_size, tile_width, tile_height, cpp;
 
 	*view = i915_ggtt_view_normal;
 
@@ -2295,26 +2306,28 @@
 	info->uv_offset = fb->offsets[1];
 	info->fb_modifier = fb->modifier[0];
 
-	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
-					fb->modifier[0], 0);
-	tile_pitch = PAGE_SIZE / tile_height;
-	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+	tile_size = intel_tile_size(dev_priv);
+
+	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
+	tile_height = tile_size / tile_width;
+
+	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
 	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
-	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+	info->size = info->width_pages * info->height_pages * tile_size;
 
 	if (info->pixel_format == DRM_FORMAT_NV12) {
-		tile_height = intel_tile_height(fb->dev, fb->pixel_format,
-						fb->modifier[0], 1);
-		tile_pitch = PAGE_SIZE / tile_height;
-		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
-		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
-						     tile_height);
-		info->size_uv = info->width_pages_uv * info->height_pages_uv *
-				PAGE_SIZE;
+		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
+		tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
+		tile_height = tile_size / tile_width;
+
+		info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
+		info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
+		info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
 	}
 }
 
-static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
 {
 	if (INTEL_INFO(dev_priv)->gen >= 9)
 		return 256 * 1024;
@@ -2327,6 +2340,25 @@
 		return 0;
 }
 
+static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
+					 uint64_t fb_modifier)
+{
+	switch (fb_modifier) {
+	case DRM_FORMAT_MOD_NONE:
+		return intel_linear_alignment(dev_priv);
+	case I915_FORMAT_MOD_X_TILED:
+		if (INTEL_INFO(dev_priv)->gen >= 9)
+			return 256 * 1024;
+		return 0;
+	case I915_FORMAT_MOD_Y_TILED:
+	case I915_FORMAT_MOD_Yf_TILED:
+		return 1 * 1024 * 1024;
+	default:
+		MISSING_CASE(fb_modifier);
+		return 0;
+	}
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
@@ -2341,29 +2373,7 @@
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	switch (fb->modifier[0]) {
-	case DRM_FORMAT_MOD_NONE:
-		alignment = intel_linear_alignment(dev_priv);
-		break;
-	case I915_FORMAT_MOD_X_TILED:
-		if (INTEL_INFO(dev)->gen >= 9)
-			alignment = 256 * 1024;
-		else {
-			/* pin() will align the object as required by fence */
-			alignment = 0;
-		}
-		break;
-	case I915_FORMAT_MOD_Y_TILED:
-	case I915_FORMAT_MOD_Yf_TILED:
-		if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
-			  "Y tiling bo slipped through, driver bug!\n"))
-			return -EINVAL;
-		alignment = 1 * 1024 * 1024;
-		break;
-	default:
-		MISSING_CASE(fb->modifier[0]);
-		return -EINVAL;
-	}
+	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
 
 	intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
@@ -2441,22 +2451,27 @@
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
-					     int *x, int *y,
-					     unsigned int tiling_mode,
-					     unsigned int cpp,
-					     unsigned int pitch)
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+			      int *x, int *y,
+			      uint64_t fb_modifier,
+			      unsigned int cpp,
+			      unsigned int pitch)
 {
-	if (tiling_mode != I915_TILING_NONE) {
+	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+		unsigned int tile_size, tile_width, tile_height;
 		unsigned int tile_rows, tiles;
 
-		tile_rows = *y / 8;
-		*y %= 8;
+		tile_size = intel_tile_size(dev_priv);
+		tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
+		tile_height = tile_size / tile_width;
 
-		tiles = *x / (512/cpp);
-		*x %= 512/cpp;
+		tile_rows = *y / tile_height;
+		*y %= tile_height;
 
-		return tile_rows * pitch * 8 + tiles * 4096;
+		tiles = *x / (tile_width/cpp);
+		*x %= tile_width/cpp;
+
+		return tile_rows * pitch * tile_height + tiles * tile_size;
 	} else {
 		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
 		unsigned int offset;
@@ -2539,12 +2554,16 @@
 	if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
 		return false;
 
+	mutex_lock(&dev->struct_mutex);
+
 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
 							     base_aligned,
 							     base_aligned,
 							     size_aligned);
-	if (!obj)
+	if (!obj) {
+		mutex_unlock(&dev->struct_mutex);
 		return false;
+	}
 
 	obj->tiling_mode = plane_config->tiling;
 	if (obj->tiling_mode == I915_TILING_X)
@@ -2557,12 +2576,12 @@
 	mode_cmd.modifier[0] = fb->modifier[0];
 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
 
-	mutex_lock(&dev->struct_mutex);
 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
 				   &mode_cmd, obj)) {
 		DRM_DEBUG_KMS("intel fb init failed\n");
 		goto out_unref_obj;
 	}
+
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
@@ -2601,6 +2620,8 @@
 	struct drm_plane_state *plane_state = primary->state;
 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
 	struct intel_plane *intel_plane = to_intel_plane(primary);
+	struct intel_plane_state *intel_state =
+		to_intel_plane_state(plane_state);
 	struct drm_framebuffer *fb;
 
 	if (!plane_config->fb)
@@ -2662,6 +2683,15 @@
 	plane_state->crtc_w = fb->width;
 	plane_state->crtc_h = fb->height;
 
+	intel_state->src.x1 = plane_state->src_x;
+	intel_state->src.y1 = plane_state->src_y;
+	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
+	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
+	intel_state->dst.x1 = plane_state->crtc_x;
+	intel_state->dst.y1 = plane_state->crtc_y;
+	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+
 	obj = intel_fb_obj(fb);
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dev_priv->preserve_bios_swizzle = true;
@@ -2673,37 +2703,22 @@
 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
-static void i9xx_update_primary_plane(struct drm_crtc *crtc,
-				      struct drm_framebuffer *fb,
-				      int x, int y)
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+				      const struct intel_crtc_state *crtc_state,
+				      const struct intel_plane_state *plane_state)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = primary->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *primary = crtc->primary;
-	bool visible = to_intel_plane_state(primary->state)->visible;
-	struct drm_i915_gem_object *obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int plane = intel_crtc->plane;
-	unsigned long linear_offset;
+	u32 linear_offset;
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
-	int pixel_size;
-
-	if (!visible || !fb) {
-		I915_WRITE(reg, 0);
-		if (INTEL_INFO(dev)->gen >= 4)
-			I915_WRITE(DSPSURF(plane), 0);
-		else
-			I915_WRITE(DSPADDR(plane), 0);
-		POSTING_READ(reg);
-		return;
-	}
-
-	obj = intel_fb_obj(fb);
-	if (WARN_ON(obj == NULL))
-		return;
-
-	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	int x = plane_state->src.x1 >> 16;
+	int y = plane_state->src.y1 >> 16;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -2717,13 +2732,13 @@
 		 * which should always be the user's requested size.
 		 */
 		I915_WRITE(DSPSIZE(plane),
-			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
-			   (intel_crtc->config->pipe_src_w - 1));
+			   ((crtc_state->pipe_src_h - 1) << 16) |
+			   (crtc_state->pipe_src_w - 1));
 		I915_WRITE(DSPPOS(plane), 0);
 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
 		I915_WRITE(PRIMSIZE(plane),
-			   ((intel_crtc->config->pipe_src_h - 1) << 16) |
-			   (intel_crtc->config->pipe_src_w - 1));
+			   ((crtc_state->pipe_src_h - 1) << 16) |
+			   (crtc_state->pipe_src_w - 1));
 		I915_WRITE(PRIMPOS(plane), 0);
 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
 	}
@@ -2761,30 +2776,29 @@
 	if (IS_G4X(dev))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	linear_offset = y * fb->pitches[0] + x * cpp;
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			intel_gen4_compute_page_offset(dev_priv,
-						       &x, &y, obj->tiling_mode,
-						       pixel_size,
-						       fb->pitches[0]);
+			intel_compute_tile_offset(dev_priv, &x, &y,
+						  fb->modifier[0], cpp,
+						  fb->pitches[0]);
 		linear_offset -= intel_crtc->dspaddr_offset;
 	} else {
 		intel_crtc->dspaddr_offset = linear_offset;
 	}
 
-	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
-		x += (intel_crtc->config->pipe_src_w - 1);
-		y += (intel_crtc->config->pipe_src_h - 1);
+		x += (crtc_state->pipe_src_w - 1);
+		y += (crtc_state->pipe_src_h - 1);
 
 		/* Finding the last pixel of the last line of the display
 		data and adding to linear_offset*/
 		linear_offset +=
-			(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
-			(intel_crtc->config->pipe_src_w - 1) * pixel_size;
+			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+			(crtc_state->pipe_src_w - 1) * cpp;
 	}
 
 	intel_crtc->adjusted_x = x;
@@ -2803,37 +2817,40 @@
 	POSTING_READ(reg);
 }
 
-static void ironlake_update_primary_plane(struct drm_crtc *crtc,
-					  struct drm_framebuffer *fb,
-					  int x, int y)
+static void i9xx_disable_primary_plane(struct drm_plane *primary,
+				       struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *primary = crtc->primary;
-	bool visible = to_intel_plane_state(primary->state)->visible;
-	struct drm_i915_gem_object *obj;
 	int plane = intel_crtc->plane;
-	unsigned long linear_offset;
+
+	I915_WRITE(DSPCNTR(plane), 0);
+	if (INTEL_INFO(dev_priv)->gen >= 4)
+		I915_WRITE(DSPSURF(plane), 0);
+	else
+		I915_WRITE(DSPADDR(plane), 0);
+	POSTING_READ(DSPCNTR(plane));
+}
+
+static void ironlake_update_primary_plane(struct drm_plane *primary,
+					  const struct intel_crtc_state *crtc_state,
+					  const struct intel_plane_state *plane_state)
+{
+	struct drm_device *dev = primary->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	int plane = intel_crtc->plane;
+	u32 linear_offset;
 	u32 dspcntr;
 	i915_reg_t reg = DSPCNTR(plane);
-	int pixel_size;
-
-	if (!visible || !fb) {
-		I915_WRITE(reg, 0);
-		I915_WRITE(DSPSURF(plane), 0);
-		POSTING_READ(reg);
-		return;
-	}
-
-	obj = intel_fb_obj(fb);
-	if (WARN_ON(obj == NULL))
-		return;
-
-	pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	int x = plane_state->src.x1 >> 16;
+	int y = plane_state->src.y1 >> 16;
 
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
-
 	dspcntr |= DISPLAY_PLANE_ENABLE;
 
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2868,25 +2885,24 @@
 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	linear_offset = y * fb->pitches[0] + x * cpp;
 	intel_crtc->dspaddr_offset =
-		intel_gen4_compute_page_offset(dev_priv,
-					       &x, &y, obj->tiling_mode,
-					       pixel_size,
-					       fb->pitches[0]);
+		intel_compute_tile_offset(dev_priv, &x, &y,
+					  fb->modifier[0], cpp,
+					  fb->pitches[0]);
 	linear_offset -= intel_crtc->dspaddr_offset;
-	if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
 		dspcntr |= DISPPLANE_ROTATE_180;
 
 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
-			x += (intel_crtc->config->pipe_src_w - 1);
-			y += (intel_crtc->config->pipe_src_h - 1);
+			x += (crtc_state->pipe_src_w - 1);
+			y += (crtc_state->pipe_src_h - 1);
 
 			/* Finding the last pixel of the last line of the display
 			data and adding to linear_offset*/
 			linear_offset +=
-				(intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
-				(intel_crtc->config->pipe_src_w - 1) * pixel_size;
+				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+				(crtc_state->pipe_src_w - 1) * cpp;
 		}
 	}
 
@@ -2907,37 +2923,15 @@
 	POSTING_READ(reg);
 }
 
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
-			      uint32_t pixel_format)
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+			      uint64_t fb_modifier, uint32_t pixel_format)
 {
-	u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
+	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
+		return 64;
+	} else {
+		int cpp = drm_format_plane_cpp(pixel_format, 0);
 
-	/*
-	 * The stride is either expressed as a multiple of 64 bytes
-	 * chunks for linear buffers or in number of tiles for tiled
-	 * buffers.
-	 */
-	switch (fb_modifier) {
-	case DRM_FORMAT_MOD_NONE:
-		return 64;
-	case I915_FORMAT_MOD_X_TILED:
-		if (INTEL_INFO(dev)->gen == 2)
-			return 128;
-		return 512;
-	case I915_FORMAT_MOD_Y_TILED:
-		/* No need to check for old gens and Y tiling since this is
-		 * about the display engine and those will be blocked before
-		 * we get here.
-		 */
-		return 128;
-	case I915_FORMAT_MOD_Yf_TILED:
-		if (bits_per_pixel == 8)
-			return 64;
-		else
-			return 128;
-	default:
-		MISSING_CASE(fb_modifier);
-		return 64;
+		return intel_tile_width(dev_priv, fb_modifier, cpp);
 	}
 }
 
@@ -2960,7 +2954,7 @@
 	offset = vma->node.start;
 
 	if (plane == 1) {
-		offset += vma->ggtt_view.params.rotation_info.uv_start_page *
+		offset += vma->ggtt_view.params.rotated.uv_start_page *
 			  PAGE_SIZE;
 	}
 
@@ -3077,36 +3071,30 @@
 	return 0;
 }
 
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
-					 struct drm_framebuffer *fb,
-					 int x, int y)
+static void skylake_update_primary_plane(struct drm_plane *plane,
+					 const struct intel_crtc_state *crtc_state,
+					 const struct intel_plane_state *plane_state)
 {
-	struct drm_device *dev = crtc->dev;
+	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *plane = crtc->primary;
-	bool visible = to_intel_plane_state(plane->state)->visible;
-	struct drm_i915_gem_object *obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int pipe = intel_crtc->pipe;
 	u32 plane_ctl, stride_div, stride;
 	u32 tile_height, plane_offset, plane_size;
-	unsigned int rotation;
+	unsigned int rotation = plane_state->base.rotation;
 	int x_offset, y_offset;
 	u32 surf_addr;
-	struct intel_crtc_state *crtc_state = intel_crtc->config;
-	struct intel_plane_state *plane_state;
-	int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
-	int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
-	int scaler_id = -1;
-
-	plane_state = to_intel_plane_state(plane->state);
-
-	if (!visible || !fb) {
-		I915_WRITE(PLANE_CTL(pipe, 0), 0);
-		I915_WRITE(PLANE_SURF(pipe, 0), 0);
-		POSTING_READ(PLANE_CTL(pipe, 0));
-		return;
-	}
+	int scaler_id = plane_state->scaler_id;
+	int src_x = plane_state->src.x1 >> 16;
+	int src_y = plane_state->src.y1 >> 16;
+	int src_w = drm_rect_width(&plane_state->src) >> 16;
+	int src_h = drm_rect_height(&plane_state->src) >> 16;
+	int dst_x = plane_state->dst.x1;
+	int dst_y = plane_state->dst.y1;
+	int dst_w = drm_rect_width(&plane_state->dst);
+	int dst_h = drm_rect_height(&plane_state->dst);
 
 	plane_ctl = PLANE_CTL_ENABLE |
 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3115,41 +3103,27 @@
 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
-	rotation = plane->state->rotation;
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-	obj = intel_fb_obj(fb);
-	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
 					       fb->pixel_format);
 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
 
 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
 
-	scaler_id = plane_state->scaler_id;
-	src_x = plane_state->src.x1 >> 16;
-	src_y = plane_state->src.y1 >> 16;
-	src_w = drm_rect_width(&plane_state->src) >> 16;
-	src_h = drm_rect_height(&plane_state->src) >> 16;
-	dst_x = plane_state->dst.x1;
-	dst_y = plane_state->dst.y1;
-	dst_w = drm_rect_width(&plane_state->dst);
-	dst_h = drm_rect_height(&plane_state->dst);
-
-	WARN_ON(x != src_x || y != src_y);
-
 	if (intel_rotation_90_or_270(rotation)) {
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
 		/* stride = Surface height in tiles */
-		tile_height = intel_tile_height(dev, fb->pixel_format,
-						fb->modifier[0], 0);
+		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
 		stride = DIV_ROUND_UP(fb->height, tile_height);
-		x_offset = stride * tile_height - y - src_h;
-		y_offset = x;
+		x_offset = stride * tile_height - src_y - src_h;
+		y_offset = src_x;
 		plane_size = (src_w - 1) << 16 | (src_h - 1);
 	} else {
 		stride = fb->pitches[0] / stride_div;
-		x_offset = x;
-		y_offset = y;
+		x_offset = src_x;
+		y_offset = src_y;
 		plane_size = (src_h - 1) << 16 | (src_w - 1);
 	}
 	plane_offset = y_offset << 16 | x_offset;
@@ -3182,20 +3156,27 @@
 	POSTING_READ(PLANE_SURF(pipe, 0));
 }
 
+static void skylake_disable_primary_plane(struct drm_plane *primary,
+					  struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = to_intel_crtc(crtc)->pipe;
+
+	I915_WRITE(PLANE_CTL(pipe, 0), 0);
+	I915_WRITE(PLANE_SURF(pipe, 0), 0);
+	POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
 static int
 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			   int x, int y, enum mode_set_atomic state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	/* Support for kgdboc is disabled, this needs a major rework. */
+	DRM_ERROR("legacy panic handler not supported any more.\n");
 
-	if (dev_priv->fbc.deactivate)
-		dev_priv->fbc.deactivate(dev_priv);
-
-	dev_priv->display.update_primary_plane(crtc, fb, x, y);
-
-	return 0;
+	return -ENODEV;
 }
 
 static void intel_complete_page_flips(struct drm_device *dev)
@@ -3222,8 +3203,10 @@
 		drm_modeset_lock_crtc(crtc, &plane->base);
 		plane_state = to_intel_plane_state(plane->base.state);
 
-		if (crtc->state->active && plane_state->base.fb)
-			plane->commit_plane(&plane->base, plane_state);
+		if (plane_state->visible)
+			plane->update_plane(&plane->base,
+					    to_intel_crtc_state(crtc->state),
+					    plane_state);
 
 		drm_modeset_unlock_crtc(crtc);
 	}
@@ -4455,7 +4438,7 @@
 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
 
 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-		&state->scaler_state.scaler_id, DRM_ROTATE_0,
+		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
 		state->pipe_src_w, state->pipe_src_h,
 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4809,9 +4792,6 @@
 		to_intel_crtc_state(crtc->base.state);
 	struct drm_device *dev = crtc->base.dev;
 
-	if (atomic->wait_vblank)
-		intel_wait_for_vblank(dev, crtc->pipe);
-
 	intel_frontbuffer_flip(dev, atomic->fb_bits);
 
 	crtc->wm.cxsr_allowed = true;
@@ -4820,7 +4800,7 @@
 		intel_update_watermarks(&crtc->base);
 
 	if (atomic->update_fbc)
-		intel_fbc_update(crtc);
+		intel_fbc_post_update(crtc);
 
 	if (atomic->post_enable_primary)
 		intel_post_enable_primary(&crtc->base);
@@ -4828,26 +4808,39 @@
 	memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_pre_plane_update(struct intel_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
+	struct drm_atomic_state *old_state = old_crtc_state->base.state;
+	struct drm_plane *primary = crtc->base.primary;
+	struct drm_plane_state *old_pri_state =
+		drm_atomic_get_existing_plane_state(old_state, primary);
+	bool modeset = needs_modeset(&pipe_config->base);
 
-	if (atomic->disable_fbc)
-		intel_fbc_deactivate(crtc);
+	if (atomic->update_fbc)
+		intel_fbc_pre_update(crtc);
 
-	if (crtc->atomic.disable_ips)
-		hsw_disable_ips(crtc);
+	if (old_pri_state) {
+		struct intel_plane_state *primary_state =
+			to_intel_plane_state(primary->state);
+		struct intel_plane_state *old_primary_state =
+			to_intel_plane_state(old_pri_state);
 
-	if (atomic->pre_disable_primary)
-		intel_pre_disable_primary(&crtc->base);
+		if (old_primary_state->visible &&
+		    (modeset || !primary_state->visible))
+			intel_pre_disable_primary(&crtc->base);
+	}
 
 	if (pipe_config->disable_cxsr) {
 		crtc->wm.cxsr_allowed = false;
-		intel_set_memory_cxsr(dev_priv, false);
+
+		if (old_crtc_state->base.active)
+			intel_set_memory_cxsr(dev_priv, false);
 	}
 
 	if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
@@ -4948,8 +4941,6 @@
 	if (intel_crtc->config->has_pch_encoder)
 		intel_wait_for_vblank(dev, pipe);
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
-	intel_fbc_enable(intel_crtc);
 }
 
 /* IPS only exists on ULT machines and is tied to pipe A. */
@@ -5062,8 +5053,6 @@
 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
 	}
-
-	intel_fbc_enable(intel_crtc);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5144,8 +5133,6 @@
 	}
 
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
-	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5196,8 +5183,6 @@
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
 	}
-
-	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5320,31 +5305,37 @@
 	}
 }
 
-static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
+					    struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
 	unsigned long mask;
-	enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
+	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
-	if (!crtc->state->active)
+	if (!crtc_state->base.active)
 		return 0;
 
 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
-	if (intel_crtc->config->pch_pfit.enabled ||
-	    intel_crtc->config->pch_pfit.force_thru)
+	if (crtc_state->pch_pfit.enabled ||
+	    crtc_state->pch_pfit.force_thru)
 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 
-	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
+	}
 
 	return mask;
 }
 
-static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long
+modeset_get_crtc_power_domains(struct drm_crtc *crtc,
+			       struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5352,7 +5343,8 @@
 	unsigned long domains, new_domains, old_domains;
 
 	old_domains = intel_crtc->enabled_power_domains;
-	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+	intel_crtc->enabled_power_domains = new_domains =
+		get_crtc_power_domains(crtc, crtc_state);
 
 	domains = new_domains & ~old_domains;
 
@@ -5371,34 +5363,6 @@
 		intel_display_power_put(dev_priv, domain);
 }
 
-static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
-{
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long put_domains[I915_MAX_PIPES] = {};
-	struct drm_crtc_state *crtc_state;
-	struct drm_crtc *crtc;
-	int i;
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (needs_modeset(crtc->state))
-			put_domains[to_intel_crtc(crtc)->pipe] =
-				modeset_get_crtc_power_domains(crtc);
-	}
-
-	if (dev_priv->display.modeset_commit_cdclk) {
-		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
-
-		if (cdclk != dev_priv->cdclk_freq &&
-		    !WARN_ON(!state->allow_modeset))
-			dev_priv->display.modeset_commit_cdclk(state);
-	}
-
-	for (i = 0; i < I915_MAX_PIPES; i++)
-		if (put_domains[i])
-			modeset_put_power_domains(dev_priv, put_domains[i]);
-}
-
 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 {
 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -6061,27 +6025,32 @@
 		return 144000;
 }
 
-/* Compute the max pixel clock for new configuration. Uses atomic state if
- * that's non-NULL, look at current state otherwise. */
+/* Compute the max pixel clock for new configuration. */
 static int intel_mode_max_pixclk(struct drm_device *dev,
 				 struct drm_atomic_state *state)
 {
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *crtc_state;
-	int max_pixclk = 0;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	unsigned max_pixclk = 0, i;
+	enum pipe pipe;
 
-	for_each_intel_crtc(dev, intel_crtc) {
-		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
+	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+	       sizeof(intel_state->min_pixclk));
 
-		if (!crtc_state->base.enable)
-			continue;
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		int pixclk = 0;
 
-		max_pixclk = max(max_pixclk,
-				 crtc_state->base.adjusted_mode.crtc_clock);
+		if (crtc_state->enable)
+			pixclk = crtc_state->adjusted_mode.crtc_clock;
+
+		intel_state->min_pixclk[i] = pixclk;
 	}
 
+	for_each_pipe(dev_priv, pipe)
+		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
+
 	return max_pixclk;
 }
 
@@ -6090,13 +6059,18 @@
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int max_pixclk = intel_mode_max_pixclk(dev, state);
+	struct intel_atomic_state *intel_state =
+		to_intel_atomic_state(state);
 
 	if (max_pixclk < 0)
 		return max_pixclk;
 
-	to_intel_atomic_state(state)->cdclk =
+	intel_state->cdclk = intel_state->dev_cdclk =
 		valleyview_calc_cdclk(dev_priv, max_pixclk);
 
+	if (!intel_state->active_crtcs)
+		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
+
 	return 0;
 }
 
@@ -6105,13 +6079,18 @@
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int max_pixclk = intel_mode_max_pixclk(dev, state);
+	struct intel_atomic_state *intel_state =
+		to_intel_atomic_state(state);
 
 	if (max_pixclk < 0)
 		return max_pixclk;
 
-	to_intel_atomic_state(state)->cdclk =
+	intel_state->cdclk = intel_state->dev_cdclk =
 		broxton_calc_cdclk(dev_priv, max_pixclk);
 
+	if (!intel_state->active_crtcs)
+		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+
 	return 0;
 }
 
@@ -6154,8 +6133,10 @@
 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_atomic_state *old_intel_state =
+		to_intel_atomic_state(old_state);
+	unsigned req_cdclk = old_intel_state->dev_cdclk;
 
 	/*
 	 * FIXME: We can end up here with all power domains off, yet
@@ -6290,8 +6271,6 @@
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->enable(encoder);
-
-	intel_fbc_enable(intel_crtc);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6354,8 +6333,6 @@
 
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-	intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6379,6 +6356,7 @@
 
 	dev_priv->display.crtc_disable(crtc);
 	intel_crtc->active = false;
+	intel_fbc_disable(intel_crtc);
 	intel_update_watermarks(crtc);
 	intel_disable_shared_dpll(intel_crtc);
 
@@ -6386,6 +6364,9 @@
 	for_each_power_domain(domain, domains)
 		intel_display_power_put(dev_priv, domain);
 	intel_crtc->enabled_power_domains = 0;
+
+	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
 }
 
 /*
@@ -6394,55 +6375,16 @@
  */
 int intel_display_suspend(struct drm_device *dev)
 {
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *state;
-	struct drm_crtc *crtc;
-	unsigned crtc_mask = 0;
-	int ret = 0;
+	int ret;
 
-	if (WARN_ON(!ctx))
-		return 0;
-
-	lockdep_assert_held(&ctx->ww_ctx);
-	state = drm_atomic_state_alloc(dev);
-	if (WARN_ON(!state))
-		return -ENOMEM;
-
-	state->acquire_ctx = ctx;
-	state->allow_modeset = true;
-
-	for_each_crtc(dev, crtc) {
-		struct drm_crtc_state *crtc_state =
-			drm_atomic_get_crtc_state(state, crtc);
-
-		ret = PTR_ERR_OR_ZERO(crtc_state);
-		if (ret)
-			goto free;
-
-		if (!crtc_state->active)
-			continue;
-
-		crtc_state->active = false;
-		crtc_mask |= 1 << drm_crtc_index(crtc);
-	}
-
-	if (crtc_mask) {
-		ret = drm_atomic_commit(state);
-
-		if (!ret) {
-			for_each_crtc(dev, crtc)
-				if (crtc_mask & (1 << drm_crtc_index(crtc)))
-					crtc->state->active = true;
-
-			return ret;
-		}
-	}
-
-free:
+	state = drm_atomic_helper_suspend(dev);
+	ret = PTR_ERR_OR_ZERO(state);
 	if (ret)
 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
-	drm_atomic_state_free(state);
+	else
+		dev_priv->modeset_restore_state = state;
 	return ret;
 }
 
@@ -7596,26 +7538,34 @@
  * in cases where we need the PLL enabled even when @pipe is not going to
  * be enabled.
  */
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
-		      const struct dpll *dpll)
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+		     const struct dpll *dpll)
 {
 	struct intel_crtc *crtc =
 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-	struct intel_crtc_state pipe_config = {
-		.base.crtc = &crtc->base,
-		.pixel_multiplier = 1,
-		.dpll = *dpll,
-	};
+	struct intel_crtc_state *pipe_config;
+
+	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+	if (!pipe_config)
+		return -ENOMEM;
+
+	pipe_config->base.crtc = &crtc->base;
+	pipe_config->pixel_multiplier = 1;
+	pipe_config->dpll = *dpll;
 
 	if (IS_CHERRYVIEW(dev)) {
-		chv_compute_dpll(crtc, &pipe_config);
-		chv_prepare_pll(crtc, &pipe_config);
-		chv_enable_pll(crtc, &pipe_config);
+		chv_compute_dpll(crtc, pipe_config);
+		chv_prepare_pll(crtc, pipe_config);
+		chv_enable_pll(crtc, pipe_config);
 	} else {
-		vlv_compute_dpll(crtc, &pipe_config);
-		vlv_prepare_pll(crtc, &pipe_config);
-		vlv_enable_pll(crtc, &pipe_config);
+		vlv_compute_dpll(crtc, pipe_config);
+		vlv_prepare_pll(crtc, pipe_config);
+		vlv_enable_pll(crtc, pipe_config);
 	}
+
+	kfree(pipe_config);
+
+	return 0;
 }
 
 /**
@@ -9258,7 +9208,7 @@
 	fb->width = ((val >> 0) & 0x1fff) + 1;
 
 	val = I915_READ(PLANE_STRIDE(pipe, 0));
-	stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
 						fb->pixel_format);
 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
@@ -9682,14 +9632,14 @@
 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
 	}
-
-	intel_prepare_ddi(dev);
 }
 
 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+	struct intel_atomic_state *old_intel_state =
+		to_intel_atomic_state(old_state);
+	unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
 	broxton_set_cdclk(dev, req_cdclk);
 }
@@ -9697,29 +9647,38 @@
 /* compute the max rate for new configuration */
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
-	struct intel_crtc *intel_crtc;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_i915_private *dev_priv = state->dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *cstate;
 	struct intel_crtc_state *crtc_state;
-	int max_pixel_rate = 0;
+	unsigned max_pixel_rate = 0, i;
+	enum pipe pipe;
 
-	for_each_intel_crtc(state->dev, intel_crtc) {
+	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+	       sizeof(intel_state->min_pixclk));
+
+	for_each_crtc_in_state(state, crtc, cstate, i) {
 		int pixel_rate;
 
-		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-
-		if (!crtc_state->base.enable)
+		crtc_state = to_intel_crtc_state(cstate);
+		if (!crtc_state->base.enable) {
+			intel_state->min_pixclk[i] = 0;
 			continue;
+		}
 
 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
 
-		max_pixel_rate = max(max_pixel_rate, pixel_rate);
+		intel_state->min_pixclk[i] = pixel_rate;
 	}
 
+	for_each_pipe(dev_priv, pipe)
+		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
+
 	return max_pixel_rate;
 }
 
@@ -9803,6 +9762,7 @@
 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	int max_pixclk = ilk_max_pixel_rate(state);
 	int cdclk;
 
@@ -9825,7 +9785,9 @@
 		return -EINVAL;
 	}
 
-	to_intel_atomic_state(state)->cdclk = cdclk;
+	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+	if (!intel_state->active_crtcs)
+		intel_state->dev_cdclk = 337500;
 
 	return 0;
 }
@@ -9833,7 +9795,9 @@
 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+	struct intel_atomic_state *old_intel_state =
+		to_intel_atomic_state(old_state);
+	unsigned req_cdclk = old_intel_state->dev_cdclk;
 
 	broadwell_set_cdclk(dev, req_cdclk);
 }
@@ -9841,8 +9805,13 @@
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 				      struct intel_crtc_state *crtc_state)
 {
-	if (!intel_ddi_pll_select(crtc, crtc_state))
-		return -EINVAL;
+	struct intel_encoder *intel_encoder =
+		intel_ddi_get_crtc_new_encoder(crtc_state);
+
+	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+		if (!intel_ddi_pll_select(crtc, crtc_state))
+			return -EINVAL;
+	}
 
 	crtc->lowfreq_avail = false;
 
@@ -10058,16 +10027,17 @@
 	return ret;
 }
 
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+			       const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t cntl = 0, size = 0;
 
-	if (on) {
-		unsigned int width = intel_crtc->base.cursor->state->crtc_w;
-		unsigned int height = intel_crtc->base.cursor->state->crtc_h;
+	if (plane_state && plane_state->visible) {
+		unsigned int width = plane_state->base.crtc_w;
+		unsigned int height = plane_state->base.crtc_h;
 		unsigned int stride = roundup_pow_of_two(width) * 4;
 
 		switch (stride) {
@@ -10120,7 +10090,8 @@
 	}
 }
 
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
+			       const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10128,9 +10099,9 @@
 	int pipe = intel_crtc->pipe;
 	uint32_t cntl = 0;
 
-	if (on) {
+	if (plane_state && plane_state->visible) {
 		cntl = MCURSOR_GAMMA_ENABLE;
-		switch (intel_crtc->base.cursor->state->crtc_w) {
+		switch (plane_state->base.crtc_w) {
 			case 64:
 				cntl |= CURSOR_MODE_64_ARGB_AX;
 				break;
@@ -10141,17 +10112,17 @@
 				cntl |= CURSOR_MODE_256_ARGB_AX;
 				break;
 			default:
-				MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
+				MISSING_CASE(plane_state->base.crtc_w);
 				return;
 		}
 		cntl |= pipe << 28; /* Connect to correct pipe */
 
 		if (HAS_DDI(dev))
 			cntl |= CURSOR_PIPE_CSC_ENABLE;
-	}
 
-	if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
-		cntl |= CURSOR_ROTATE_180;
+		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+			cntl |= CURSOR_ROTATE_180;
+	}
 
 	if (intel_crtc->cursor_cntl != cntl) {
 		I915_WRITE(CURCNTR(pipe), cntl);
@@ -10168,56 +10139,45 @@
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
-				     bool on)
+				     const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	struct drm_plane_state *cursor_state = crtc->cursor->state;
-	int x = cursor_state->crtc_x;
-	int y = cursor_state->crtc_y;
-	u32 base = 0, pos = 0;
+	u32 base = intel_crtc->cursor_addr;
+	u32 pos = 0;
 
-	base = intel_crtc->cursor_addr;
+	if (plane_state) {
+		int x = plane_state->base.crtc_x;
+		int y = plane_state->base.crtc_y;
 
-	if (x >= intel_crtc->config->pipe_src_w)
-		on = false;
+		if (x < 0) {
+			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+			x = -x;
+		}
+		pos |= x << CURSOR_X_SHIFT;
 
-	if (y >= intel_crtc->config->pipe_src_h)
-		on = false;
+		if (y < 0) {
+			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+			y = -y;
+		}
+		pos |= y << CURSOR_Y_SHIFT;
 
-	if (x < 0) {
-		if (x + cursor_state->crtc_w <= 0)
-			on = false;
-
-		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-		x = -x;
+		/* ILK+ do this automagically */
+		if (HAS_GMCH_DISPLAY(dev) &&
+		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+			base += (plane_state->base.crtc_h *
+				 plane_state->base.crtc_w - 1) * 4;
+		}
 	}
-	pos |= x << CURSOR_X_SHIFT;
-
-	if (y < 0) {
-		if (y + cursor_state->crtc_h <= 0)
-			on = false;
-
-		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-		y = -y;
-	}
-	pos |= y << CURSOR_Y_SHIFT;
 
 	I915_WRITE(CURPOS(pipe), pos);
 
-	/* ILK+ do this automagically */
-	if (HAS_GMCH_DISPLAY(dev) &&
-	    crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
-		base += (cursor_state->crtc_h *
-			 cursor_state->crtc_w - 1) * 4;
-	}
-
 	if (IS_845G(dev) || IS_I865G(dev))
-		i845_update_cursor(crtc, base, on);
+		i845_update_cursor(crtc, base, plane_state);
 	else
-		i9xx_update_cursor(crtc, base, on);
+		i9xx_update_cursor(crtc, base, plane_state);
 }
 
 static bool cursor_size_ok(struct drm_device *dev,
@@ -10385,6 +10345,7 @@
 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
 		return NULL;
 
+	drm_framebuffer_reference(fb);
 	return fb;
 #else
 	return NULL;
@@ -10440,7 +10401,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct drm_framebuffer *fb;
 	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_atomic_state *state = NULL;
+	struct drm_atomic_state *state = NULL, *restore_state = NULL;
 	struct drm_connector_state *connector_state;
 	struct intel_crtc_state *crtc_state;
 	int ret, i = -1;
@@ -10449,6 +10410,8 @@
 		      connector->base.id, connector->name,
 		      encoder->base.id, encoder->name);
 
+	old->restore_state = NULL;
+
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
 	if (ret)
@@ -10465,24 +10428,15 @@
 	 */
 
 	/* See if we already have a CRTC for this connector */
-	if (encoder->crtc) {
-		crtc = encoder->crtc;
+	if (connector->state->crtc) {
+		crtc = connector->state->crtc;
 
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
 			goto fail;
-		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
-		if (ret)
-			goto fail;
-
-		old->dpms_mode = connector->dpms;
-		old->load_detect_temp = false;
 
 		/* Make sure the crtc and connector are running */
-		if (connector->dpms != DRM_MODE_DPMS_ON)
-			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
-
-		return true;
+		goto found;
 	}
 
 	/* Find an unused one (if possible) */
@@ -10490,8 +10444,15 @@
 		i++;
 		if (!(encoder->possible_crtcs & (1 << i)))
 			continue;
-		if (possible_crtc->state->enable)
+
+		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+		if (ret)
+			goto fail;
+
+		if (possible_crtc->state->enable) {
+			drm_modeset_unlock(&possible_crtc->mutex);
 			continue;
+		}
 
 		crtc = possible_crtc;
 		break;
@@ -10505,23 +10466,22 @@
 		goto fail;
 	}
 
-	ret = drm_modeset_lock(&crtc->mutex, ctx);
-	if (ret)
-		goto fail;
+found:
+	intel_crtc = to_intel_crtc(crtc);
+
 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 	if (ret)
 		goto fail;
 
-	intel_crtc = to_intel_crtc(crtc);
-	old->dpms_mode = connector->dpms;
-	old->load_detect_temp = true;
-	old->release_fb = NULL;
-
 	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return false;
+	restore_state = drm_atomic_state_alloc(dev);
+	if (!state || !restore_state) {
+		ret = -ENOMEM;
+		goto fail;
+	}
 
 	state->acquire_ctx = ctx;
+	restore_state->acquire_ctx = ctx;
 
 	connector_state = drm_atomic_get_connector_state(state, connector);
 	if (IS_ERR(connector_state)) {
@@ -10529,8 +10489,9 @@
 		goto fail;
 	}
 
-	connector_state->crtc = crtc;
-	connector_state->best_encoder = &intel_encoder->base;
+	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+	if (ret)
+		goto fail;
 
 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 	if (IS_ERR(crtc_state)) {
@@ -10554,7 +10515,6 @@
 	if (fb == NULL) {
 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
-		old->release_fb = fb;
 	} else
 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
 	if (IS_ERR(fb)) {
@@ -10566,15 +10526,29 @@
 	if (ret)
 		goto fail;
 
-	drm_mode_copy(&crtc_state->base.mode, mode);
+	drm_framebuffer_unreference(fb);
 
-	if (drm_atomic_commit(state)) {
-		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
-		if (old->release_fb)
-			old->release_fb->funcs->destroy(old->release_fb);
+	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+	if (ret)
+		goto fail;
+
+	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+	if (!ret)
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+	if (!ret)
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
+	if (ret) {
+		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
 		goto fail;
 	}
-	crtc->primary->crtc = crtc;
+
+	ret = drm_atomic_commit(state);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+		goto fail;
+	}
+
+	old->restore_state = restore_state;
 
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -10582,7 +10556,8 @@
 
 fail:
 	drm_atomic_state_free(state);
-	state = NULL;
+	drm_atomic_state_free(restore_state);
+	restore_state = state = NULL;
 
 	if (ret == -EDEADLK) {
 		drm_modeset_backoff(ctx);
@@ -10596,66 +10571,24 @@
 				    struct intel_load_detect_pipe *old,
 				    struct drm_modeset_acquire_ctx *ctx)
 {
-	struct drm_device *dev = connector->dev;
 	struct intel_encoder *intel_encoder =
 		intel_attached_encoder(connector);
 	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_atomic_state *state;
-	struct drm_connector_state *connector_state;
-	struct intel_crtc_state *crtc_state;
+	struct drm_atomic_state *state = old->restore_state;
 	int ret;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 		      connector->base.id, connector->name,
 		      encoder->base.id, encoder->name);
 
-	if (old->load_detect_temp) {
-		state = drm_atomic_state_alloc(dev);
-		if (!state)
-			goto fail;
-
-		state->acquire_ctx = ctx;
-
-		connector_state = drm_atomic_get_connector_state(state, connector);
-		if (IS_ERR(connector_state))
-			goto fail;
-
-		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			goto fail;
-
-		connector_state->best_encoder = NULL;
-		connector_state->crtc = NULL;
-
-		crtc_state->base.enable = crtc_state->base.active = false;
-
-		ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
-						      0, 0);
-		if (ret)
-			goto fail;
-
-		ret = drm_atomic_commit(state);
-		if (ret)
-			goto fail;
-
-		if (old->release_fb) {
-			drm_framebuffer_unregister_private(old->release_fb);
-			drm_framebuffer_unreference(old->release_fb);
-		}
-
+	if (!state)
 		return;
+
+	ret = drm_atomic_commit(state);
+	if (ret) {
+		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+		drm_atomic_state_free(state);
 	}
-
-	/* Switch crtc and encoder back off if necessary */
-	if (old->dpms_mode != DRM_MODE_DPMS_ON)
-		connector->funcs->dpms(connector, old->dpms_mode);
-
-	return;
-fail:
-	DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
-	drm_atomic_state_free(state);
 }
 
 static int i9xx_pll_refclk(struct drm_device *dev,
@@ -10810,7 +10743,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 	struct drm_display_mode *mode;
-	struct intel_crtc_state pipe_config;
+	struct intel_crtc_state *pipe_config;
 	int htot = I915_READ(HTOTAL(cpu_transcoder));
 	int hsync = I915_READ(HSYNC(cpu_transcoder));
 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -10821,6 +10754,12 @@
 	if (!mode)
 		return NULL;
 
+	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+	if (!pipe_config) {
+		kfree(mode);
+		return NULL;
+	}
+
 	/*
 	 * Construct a pipe_config sufficient for getting the clock info
 	 * back out of crtc_clock_get.
@@ -10828,14 +10767,14 @@
 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
 	 * to use a real value here instead.
 	 */
-	pipe_config.cpu_transcoder = (enum transcoder) pipe;
-	pipe_config.pixel_multiplier = 1;
-	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
-	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
-	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
-	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+	pipe_config->cpu_transcoder = (enum transcoder) pipe;
+	pipe_config->pixel_multiplier = 1;
+	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+	i9xx_crtc_clock_get(intel_crtc, pipe_config);
 
-	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
+	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
 	mode->hdisplay = (htot & 0xffff) + 1;
 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
 	mode->hsync_start = (hsync & 0xffff) + 1;
@@ -10847,6 +10786,8 @@
 
 	drm_mode_set_name(mode);
 
+	kfree(pipe_config);
+
 	return mode;
 }
 
@@ -10917,6 +10858,7 @@
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+	intel_fbc_post_update(crtc);
 	drm_framebuffer_unreference(work->old_fb);
 
 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
@@ -10998,6 +10940,12 @@
 		return true;
 
 	/*
+	 * BDW signals flip done immediately if the plane
+	 * is disabled, even if the plane enable is already
+	 * armed to occur at the next vblank :(
+	 */
+
+	/*
 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
 	 * used the same base address. In that case the mmio flip might
 	 * have completed, but the CS hasn't even executed the flip yet.
@@ -11351,13 +11299,12 @@
 	 */
 	if (intel_rotation_90_or_270(rotation)) {
 		/* stride = Surface height in tiles */
-		tile_height = intel_tile_height(dev, fb->pixel_format,
-						fb->modifier[0], 0);
+		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
 		stride = DIV_ROUND_UP(fb->height, tile_height);
 	} else {
 		stride = fb->pitches[0] /
-				intel_fb_stride_alignment(dev, fb->modifier[0],
-							  fb->pixel_format);
+			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+						  fb->pixel_format);
 	}
 
 	/*
@@ -11633,6 +11580,7 @@
 
 	crtc->primary->fb = fb;
 	update_state_fb(crtc->primary);
+	intel_fbc_pre_update(intel_crtc);
 
 	work->pending_flip_obj = obj;
 
@@ -11692,9 +11640,11 @@
 					obj->last_write_req);
 	} else {
 		if (!request) {
-			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
-			if (ret)
+			request = i915_gem_request_alloc(ring, NULL);
+			if (IS_ERR(request)) {
+				ret = PTR_ERR(request);
 				goto cleanup_unpin;
+			}
 		}
 
 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
@@ -11715,7 +11665,6 @@
 			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_fbc_deactivate(intel_crtc);
 	intel_frontbuffer_flip_prepare(dev,
 				       to_intel_plane(primary)->frontbuffer_bit);
 
@@ -11726,7 +11675,7 @@
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
-	if (request)
+	if (!IS_ERR_OR_NULL(request))
 		i915_gem_request_cancel(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
@@ -11837,11 +11786,9 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_plane *plane = plane_state->plane;
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane_state *old_plane_state =
 		to_intel_plane_state(plane->state);
 	int idx = intel_crtc->base.base.id, ret;
-	int i = drm_plane_index(plane);
 	bool mode_changed = needs_modeset(crtc_state);
 	bool was_crtc_enabled = crtc->state->active;
 	bool is_crtc_enabled = crtc_state->active;
@@ -11863,12 +11810,20 @@
 	if (!was_crtc_enabled && WARN_ON(was_visible))
 		was_visible = false;
 
-	if (!is_crtc_enabled && WARN_ON(visible))
-		visible = false;
+	/*
+	 * Visibility is calculated as if the crtc was on, but
+	 * after scaler setup everything depends on it being off
+	 * when the crtc isn't active.
+	 */
+	if (!is_crtc_enabled)
+		to_intel_plane_state(plane_state)->visible = visible = false;
 
 	if (!was_visible && !visible)
 		return 0;
 
+	if (fb != old_plane_state->base.fb)
+		pipe_config->fb_changed = true;
+
 	turn_off = was_visible && (!visible || mode_changed);
 	turn_on = visible && (!was_visible || mode_changed);
 
@@ -11883,11 +11838,8 @@
 		pipe_config->wm_changed = true;
 
 		/* must disable cxsr around plane enable/disable */
-		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
-			if (is_crtc_enabled)
-				intel_crtc->atomic.wait_vblank = true;
+		if (plane->type != DRM_PLANE_TYPE_CURSOR)
 			pipe_config->disable_cxsr = true;
-		}
 	} else if (intel_wm_need_update(plane, plane_state)) {
 		pipe_config->wm_changed = true;
 	}
@@ -11898,49 +11850,9 @@
 
 	switch (plane->type) {
 	case DRM_PLANE_TYPE_PRIMARY:
-		intel_crtc->atomic.pre_disable_primary = turn_off;
 		intel_crtc->atomic.post_enable_primary = turn_on;
+		intel_crtc->atomic.update_fbc = true;
 
-		if (turn_off) {
-			/*
-			 * FIXME: Actually if we will still have any other
-			 * plane enabled on the pipe we could let IPS enabled
-			 * still, but for now lets consider that when we make
-			 * primary invisible by setting DSPCNTR to 0 on
-			 * update_primary_plane function IPS needs to be
-			 * disable.
-			 */
-			intel_crtc->atomic.disable_ips = true;
-
-			intel_crtc->atomic.disable_fbc = true;
-		}
-
-		/*
-		 * FBC does not work on some platforms for rotated
-		 * planes, so disable it when rotation is not 0 and
-		 * update it when rotation is set back to 0.
-		 *
-		 * FIXME: This is redundant with the fbc update done in
-		 * the primary plane enable function except that that
-		 * one is done too late. We eventually need to unify
-		 * this.
-		 */
-
-		if (visible &&
-		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-		    dev_priv->fbc.crtc == intel_crtc &&
-		    plane_state->rotation != BIT(DRM_ROTATE_0))
-			intel_crtc->atomic.disable_fbc = true;
-
-		/*
-		 * BDW signals flip done immediately if the plane
-		 * is disabled, even if the plane enable is already
-		 * armed to occur at the next vblank :(
-		 */
-		if (turn_on && IS_BROADWELL(dev))
-			intel_crtc->atomic.wait_vblank = true;
-
-		intel_crtc->atomic.update_fbc |= visible || mode_changed;
 		break;
 	case DRM_PLANE_TYPE_CURSOR:
 		break;
@@ -11953,13 +11865,8 @@
 		 */
 		if (IS_IVYBRIDGE(dev) &&
 		    needs_scaling(to_intel_plane_state(plane_state)) &&
-		    !needs_scaling(old_plane_state)) {
-			to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
-		} else if (turn_off && !mode_changed) {
-			intel_crtc->atomic.wait_vblank = true;
-			intel_crtc->atomic.update_sprite_watermarks |=
-				1 << i;
-		}
+		    !needs_scaling(old_plane_state))
+			pipe_config->disable_lp_wm = true;
 
 		break;
 	}
@@ -12561,19 +12468,22 @@
 
 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
 
-	if (m > m2) {
-		while (m > m2) {
+	if (n > n2) {
+		while (n > n2) {
 			m2 <<= 1;
 			n2 <<= 1;
 		}
-	} else if (m < m2) {
-		while (m < m2) {
+	} else if (n < n2) {
+		while (n < n2) {
 			m <<= 1;
 			n <<= 1;
 		}
 	}
 
-	return m == m2 && n == n2;
+	if (n != n2)
+		return false;
+
+	return intel_fuzzy_clock_check(m, m2);
 }
 
 static bool
@@ -13124,8 +13034,6 @@
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_shared_dpll_config *shared_dpll = NULL;
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *intel_crtc_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	int i;
@@ -13134,21 +13042,21 @@
 		return;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		int dpll;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_crtc_state = to_intel_crtc_state(crtc_state);
-		dpll = intel_crtc_state->shared_dpll;
-
-		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+		if (!needs_modeset(crtc_state))
 			continue;
 
-		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+		to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+
+		if (old_dpll == DPLL_ID_PRIVATE)
+			continue;
 
 		if (!shared_dpll)
 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
-		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+		shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
 	}
 }
 
@@ -13248,15 +13156,27 @@
 
 static int intel_modeset_checks(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_i915_private *dev_priv = state->dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0, i;
 
 	if (!check_digital_port_conflicts(state)) {
 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
 		return -EINVAL;
 	}
 
+	intel_state->modeset = true;
+	intel_state->active_crtcs = dev_priv->active_crtcs;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc_state->active)
+			intel_state->active_crtcs |= 1 << i;
+		else
+			intel_state->active_crtcs &= ~(1 << i);
+	}
+
 	/*
 	 * See if the config requires any additional preparation, e.g.
 	 * to adjust global state with pipes off.  We need to do this
@@ -13265,22 +13185,22 @@
 	 * adjusted_mode bits in the crtc directly.
 	 */
 	if (dev_priv->display.modeset_calc_cdclk) {
-		unsigned int cdclk;
-
 		ret = dev_priv->display.modeset_calc_cdclk(state);
 
-		cdclk = to_intel_atomic_state(state)->cdclk;
-		if (!ret && cdclk != dev_priv->cdclk_freq)
+		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
 			ret = intel_modeset_all_pipes(state);
 
 		if (ret < 0)
 			return ret;
+
+		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
+			      intel_state->cdclk, intel_state->dev_cdclk);
 	} else
-		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
 
 	intel_modeset_clear_plls(state);
 
-	if (IS_HASWELL(dev))
+	if (IS_HASWELL(dev_priv))
 		return haswell_mode_set_planes_workaround(state);
 
 	return 0;
@@ -13333,6 +13253,7 @@
 static int intel_atomic_check(struct drm_device *dev,
 			      struct drm_atomic_state *state)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
@@ -13375,7 +13296,7 @@
 			return ret;
 
 		if (i915.fastboot &&
-		    intel_pipe_config_compare(state->dev,
+		    intel_pipe_config_compare(dev,
 					to_intel_crtc_state(crtc->state),
 					pipe_config, true)) {
 			crtc_state->mode_changed = false;
@@ -13401,12 +13322,13 @@
 		if (ret)
 			return ret;
 	} else
-		intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
+		intel_state->cdclk = dev_priv->cdclk_freq;
 
-	ret = drm_atomic_helper_check_planes(state->dev, state);
+	ret = drm_atomic_helper_check_planes(dev, state);
 	if (ret)
 		return ret;
 
+	intel_fbc_choose_crtc(dev_priv, state);
 	calc_watermark_data(state);
 
 	return 0;
@@ -13478,6 +13400,71 @@
 	return ret;
 }
 
+static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
+					  struct drm_i915_private *dev_priv,
+					  unsigned crtc_mask)
+{
+	unsigned last_vblank_count[I915_MAX_PIPES];
+	enum pipe pipe;
+	int ret;
+
+	if (!crtc_mask)
+		return;
+
+	for_each_pipe(dev_priv, pipe) {
+		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (!((1 << pipe) & crtc_mask))
+			continue;
+
+		ret = drm_crtc_vblank_get(crtc);
+		if (WARN_ON(ret != 0)) {
+			crtc_mask &= ~(1 << pipe);
+			continue;
+		}
+
+		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+	}
+
+	for_each_pipe(dev_priv, pipe) {
+		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+		long lret;
+
+		if (!((1 << pipe) & crtc_mask))
+			continue;
+
+		lret = wait_event_timeout(dev->vblank[pipe].queue,
+				last_vblank_count[pipe] !=
+					drm_crtc_vblank_count(crtc),
+				msecs_to_jiffies(50));
+
+		WARN_ON(!lret);
+
+		drm_crtc_vblank_put(crtc);
+	}
+}
+
+static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
+{
+	/* fb updated, need to unpin old fb */
+	if (crtc_state->fb_changed)
+		return true;
+
+	/* wm changes, need vblank before final wm's */
+	if (crtc_state->wm_changed)
+		return true;
+
+	/*
+	 * cxsr is re-enabled after vblank.
+	 * This is already handled by crtc_state->wm_changed,
+	 * but added for clarity.
+	 */
+	if (crtc_state->disable_cxsr)
+		return true;
+
+	return false;
+}
+
 /**
  * intel_atomic_commit - commit validated state object
  * @dev: DRM device
@@ -13498,12 +13485,14 @@
 			       struct drm_atomic_state *state,
 			       bool async)
 {
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc_state *crtc_state;
 	struct drm_crtc *crtc;
-	int ret = 0;
-	int i;
-	bool any_ms = false;
+	int ret = 0, i;
+	bool hw_check = intel_state->modeset;
+	unsigned long put_domains[I915_MAX_PIPES] = {};
+	unsigned crtc_vblank_mask = 0;
 
 	ret = intel_atomic_prepare_commit(dev, state, async);
 	if (ret) {
@@ -13514,19 +13503,37 @@
 	drm_atomic_helper_swap_state(dev, state);
 	dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
 
+	if (intel_state->modeset) {
+		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+		       sizeof(intel_state->min_pixclk));
+		dev_priv->active_crtcs = intel_state->active_crtcs;
+		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+
+		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+	}
+
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
+		if (needs_modeset(crtc->state) ||
+		    to_intel_crtc_state(crtc->state)->update_pipe) {
+			hw_check = true;
+
+			put_domains[to_intel_crtc(crtc)->pipe] =
+				modeset_get_crtc_power_domains(crtc,
+					to_intel_crtc_state(crtc->state));
+		}
+
 		if (!needs_modeset(crtc->state))
 			continue;
 
-		any_ms = true;
-		intel_pre_plane_update(intel_crtc);
+		intel_pre_plane_update(to_intel_crtc_state(crtc_state));
 
 		if (crtc_state->active) {
 			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
 			dev_priv->display.crtc_disable(crtc);
 			intel_crtc->active = false;
+			intel_fbc_disable(intel_crtc);
 			intel_disable_shared_dpll(intel_crtc);
 
 			/*
@@ -13545,65 +13552,80 @@
 	 * update the the output configuration. */
 	intel_modeset_update_crtc_state(state);
 
-	if (any_ms) {
+	if (intel_state->modeset) {
 		intel_shared_dpll_commit(state);
 
 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
-		modeset_update_crtc_power_domains(state);
+
+		if (dev_priv->display.modeset_commit_cdclk &&
+		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
+			dev_priv->display.modeset_commit_cdclk(state);
 	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		bool modeset = needs_modeset(crtc->state);
-		bool update_pipe = !modeset &&
-			to_intel_crtc_state(crtc->state)->update_pipe;
-		unsigned long put_domains = 0;
-
-		if (modeset)
-			intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+		struct intel_crtc_state *pipe_config =
+			to_intel_crtc_state(crtc->state);
+		bool update_pipe = !modeset && pipe_config->update_pipe;
 
 		if (modeset && crtc->state->active) {
 			update_scanline_offset(to_intel_crtc(crtc));
 			dev_priv->display.crtc_enable(crtc);
 		}
 
-		if (update_pipe) {
-			put_domains = modeset_get_crtc_power_domains(crtc);
-
-			/* make sure intel_modeset_check_state runs */
-			any_ms = true;
-		}
-
 		if (!modeset)
-			intel_pre_plane_update(intel_crtc);
+			intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+
+		if (crtc->state->active && intel_crtc->atomic.update_fbc)
+			intel_fbc_enable(intel_crtc);
 
 		if (crtc->state->active &&
 		    (crtc->state->planes_changed || update_pipe))
 			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
 
-		if (put_domains)
-			modeset_put_power_domains(dev_priv, put_domains);
-
-		intel_post_plane_update(intel_crtc);
-
-		if (modeset)
-			intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
+			crtc_vblank_mask |= 1 << i;
 	}
 
 	/* FIXME: add subpixel order */
 
-	drm_atomic_helper_wait_for_vblanks(dev, state);
+	if (!state->legacy_cursor_update)
+		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		intel_post_plane_update(to_intel_crtc(crtc));
+
+		if (put_domains[i])
+			modeset_put_power_domains(dev_priv, put_domains[i]);
+	}
+
+	if (intel_state->modeset)
+		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
 
 	mutex_lock(&dev->struct_mutex);
 	drm_atomic_helper_cleanup_planes(dev, state);
 	mutex_unlock(&dev->struct_mutex);
 
-	if (any_ms)
+	if (hw_check)
 		intel_modeset_check_state(dev, state);
 
 	drm_atomic_state_free(state);
 
+	/* As one of the primary mmio accessors, KMS has a high likelihood
+	 * of triggering bugs in unclaimed access. After we finish
+	 * modesetting, see if an error has been flagged, and if so
+	 * enable debugging for the next modeset - and hope we catch
+	 * the culprit.
+	 *
+	 * XXX note that we assume display power is on at this point.
+	 * This might hold true now but we need to add pm helper to check
+	 * unclaimed only when the hardware is on, as atomic commits
+	 * can happen also when the device is completely off.
+	 */
+	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
 	return 0;
 }
 
@@ -13894,7 +13916,7 @@
 	struct drm_i915_private *dev_priv;
 	int crtc_clock, cdclk;
 
-	if (!intel_crtc || !crtc_state)
+	if (!intel_crtc || !crtc_state->base.enable)
 		return DRM_PLANE_HELPER_NO_SCALING;
 
 	dev = intel_crtc->base.dev;
@@ -13943,32 +13965,6 @@
 					     &state->visible);
 }
 
-static void
-intel_commit_primary_plane(struct drm_plane *plane,
-			   struct intel_plane_state *state)
-{
-	struct drm_crtc *crtc = state->base.crtc;
-	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	crtc = crtc ? crtc : plane->crtc;
-
-	dev_priv->display.update_primary_plane(crtc, fb,
-					       state->src.x1 >> 16,
-					       state->src.y1 >> 16);
-}
-
-static void
-intel_disable_primary_plane(struct drm_plane *plane,
-			    struct drm_crtc *crtc)
-{
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
-}
-
 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 				    struct drm_crtc_state *old_crtc_state)
 {
@@ -14053,20 +14049,33 @@
 	primary->plane = pipe;
 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
-	primary->commit_plane = intel_commit_primary_plane;
-	primary->disable_plane = intel_disable_primary_plane;
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
 		primary->plane = !pipe;
 
 	if (INTEL_INFO(dev)->gen >= 9) {
 		intel_primary_formats = skl_primary_formats;
 		num_formats = ARRAY_SIZE(skl_primary_formats);
+
+		primary->update_plane = skylake_update_primary_plane;
+		primary->disable_plane = skylake_disable_primary_plane;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		intel_primary_formats = i965_primary_formats;
+		num_formats = ARRAY_SIZE(i965_primary_formats);
+
+		primary->update_plane = ironlake_update_primary_plane;
+		primary->disable_plane = i9xx_disable_primary_plane;
 	} else if (INTEL_INFO(dev)->gen >= 4) {
 		intel_primary_formats = i965_primary_formats;
 		num_formats = ARRAY_SIZE(i965_primary_formats);
+
+		primary->update_plane = i9xx_update_primary_plane;
+		primary->disable_plane = i9xx_disable_primary_plane;
 	} else {
 		intel_primary_formats = i8xx_primary_formats;
 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
+
+		primary->update_plane = i9xx_update_primary_plane;
+		primary->disable_plane = i9xx_disable_primary_plane;
 	}
 
 	drm_universal_plane_init(dev, &primary->base, 0,
@@ -14165,22 +14174,23 @@
 intel_disable_cursor_plane(struct drm_plane *plane,
 			   struct drm_crtc *crtc)
 {
-	intel_crtc_update_cursor(crtc, false);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->cursor_addr = 0;
+	intel_crtc_update_cursor(crtc, NULL);
 }
 
 static void
-intel_commit_cursor_plane(struct drm_plane *plane,
-			  struct intel_plane_state *state)
+intel_update_cursor_plane(struct drm_plane *plane,
+			  const struct intel_crtc_state *crtc_state,
+			  const struct intel_plane_state *state)
 {
-	struct drm_crtc *crtc = state->base.crtc;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct drm_device *dev = plane->dev;
-	struct intel_crtc *intel_crtc;
 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
 	uint32_t addr;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-
 	if (!obj)
 		addr = 0;
 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -14189,9 +14199,7 @@
 		addr = obj->phys_handle->busaddr;
 
 	intel_crtc->cursor_addr = addr;
-
-	if (crtc->state->active)
-		intel_crtc_update_cursor(crtc, state->visible);
+	intel_crtc_update_cursor(crtc, state);
 }
 
 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -14217,7 +14225,7 @@
 	cursor->plane = pipe;
 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
 	cursor->check_plane = intel_check_cursor_plane;
-	cursor->commit_plane = intel_commit_cursor_plane;
+	cursor->update_plane = intel_update_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
 
 	drm_universal_plane_init(dev, &cursor->base, 0,
@@ -14664,10 +14672,12 @@
 	u32 gen = INTEL_INFO(dev)->gen;
 
 	if (gen >= 9) {
+		int cpp = drm_format_plane_cpp(pixel_format, 0);
+
 		/* "The stride in bytes must not exceed the of the size of 8K
 		 *  pixels and 32K bytes."
 		 */
-		 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+		return min(8192 * cpp, 32768);
 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
 		return 32*1024;
 	} else if (gen >= 4) {
@@ -14691,6 +14701,7 @@
 				  struct drm_mode_fb_cmd2 *mode_cmd,
 				  struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned int aligned_height;
 	int ret;
 	u32 pitch_limit, stride_alignment;
@@ -14732,7 +14743,8 @@
 		return -EINVAL;
 	}
 
-	stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+	stride_alignment = intel_fb_stride_alignment(dev_priv,
+						     mode_cmd->modifier[0],
 						     mode_cmd->pixel_format);
 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
@@ -14824,7 +14836,6 @@
 
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 	intel_fb->obj = obj;
-	intel_fb->obj->framebuffer_references++;
 
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
@@ -14832,6 +14843,8 @@
 		return ret;
 	}
 
+	intel_fb->obj->framebuffer_references++;
+
 	return 0;
 }
 
@@ -14895,8 +14908,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.update_primary_plane =
-			skylake_update_primary_plane;
 	} else if (HAS_DDI(dev)) {
 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
@@ -14905,8 +14916,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.update_primary_plane =
-			ironlake_update_primary_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
@@ -14915,8 +14924,6 @@
 			ironlake_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-		dev_priv->display.update_primary_plane =
-			ironlake_update_primary_plane;
 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
@@ -14924,8 +14931,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.update_primary_plane =
-			i9xx_update_primary_plane;
 	} else {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.get_initial_plane_config =
@@ -14933,8 +14938,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.update_primary_plane =
-			i9xx_update_primary_plane;
 	}
 
 	/* Returns the core display clock speed */
@@ -15240,12 +15243,89 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	intel_update_cdclk(dev);
-	intel_prepare_ddi(dev);
+
+	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+
 	intel_init_clock_gating(dev);
 	intel_enable_gt_powersave(dev);
 }
 
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *cstate;
+	struct drm_modeset_acquire_ctx ctx;
+	int ret;
+	int i;
+
+	/* Only supported on platforms that use atomic watermark design */
+	if (!dev_priv->display.program_watermarks)
+		return;
+
+	/*
+	 * We need to hold connection_mutex before calling duplicate_state so
+	 * that the connector loop is protected.
+	 */
+	drm_modeset_acquire_init(&ctx, 0);
+retry:
+	ret = drm_modeset_lock_all_ctx(dev, &ctx);
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	} else if (WARN_ON(ret)) {
+		goto fail;
+	}
+
+	state = drm_atomic_helper_duplicate_state(dev, &ctx);
+	if (WARN_ON(IS_ERR(state)))
+		goto fail;
+
+	ret = intel_atomic_check(dev, state);
+	if (ret) {
+		/*
+		 * If we fail here, it means that the hardware appears to be
+		 * programmed in a way that shouldn't be possible, given our
+		 * understanding of watermark requirements.  This might mean a
+		 * mistake in the hardware readout code or a mistake in the
+		 * watermark calculations for a given platform.  Raise a WARN
+		 * so that this is noticeable.
+		 *
+		 * If this actually happens, we'll have to just leave the
+		 * BIOS-programmed watermarks untouched and hope for the best.
+		 */
+		WARN(true, "Could not determine valid watermarks for inherited state\n");
+		goto fail;
+	}
+
+	/* Write calculated watermark values back */
+	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
+	for_each_crtc_in_state(state, crtc, cstate, i) {
+		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+
+		dev_priv->display.program_watermarks(cs);
+	}
+
+	drm_atomic_state_free(state);
+fail:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -15366,6 +15446,13 @@
 		 */
 		intel_find_initial_plane_obj(crtc, &plane_config);
 	}
+
+	/*
+	 * Make sure hardware watermarks really match the state we read out.
+	 * Note that we need to do this after reconstructing the BIOS fb's
+	 * since the watermark calculation done here will use pstate->fb.
+	 */
+	sanitize_watermarks(dev);
 }
 
 static void intel_enable_pipe_a(struct drm_device *dev)
@@ -15422,6 +15509,17 @@
 	return false;
 }
 
+static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_connector *connector;
+
+	for_each_connector_on_encoder(dev, &encoder->base, connector)
+		return true;
+
+	return false;
+}
+
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -15496,6 +15594,7 @@
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 		crtc->base.state->connector_mask = 0;
+		crtc->base.state->encoder_mask = 0;
 
 		/* Because we only establish the connector -> encoder ->
 		 * crtc links if something is active, this means the
@@ -15531,7 +15630,6 @@
 {
 	struct intel_connector *connector;
 	struct drm_device *dev = encoder->base.dev;
-	bool active = false;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -15539,15 +15637,7 @@
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	for_each_intel_connector(dev, connector) {
-		if (connector->base.encoder != &encoder->base)
-			continue;
-
-		active = true;
-		break;
-	}
-
-	if (active && !has_active_crtc) {
+	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -15640,16 +15730,40 @@
 	struct intel_connector *connector;
 	int i;
 
+	dev_priv->active_crtcs = 0;
+
 	for_each_intel_crtc(dev, crtc) {
-		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
-		memset(crtc->config, 0, sizeof(*crtc->config));
-		crtc->config->base.crtc = &crtc->base;
+		struct intel_crtc_state *crtc_state = crtc->config;
+		int pixclk = 0;
 
-		crtc->active = dev_priv->display.get_pipe_config(crtc,
-								 crtc->config);
+		__drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+		memset(crtc_state, 0, sizeof(*crtc_state));
+		crtc_state->base.crtc = &crtc->base;
 
-		crtc->base.state->active = crtc->active;
-		crtc->base.enabled = crtc->active;
+		crtc_state->base.active = crtc_state->base.enable =
+			dev_priv->display.get_pipe_config(crtc, crtc_state);
+
+		crtc->base.enabled = crtc_state->base.enable;
+		crtc->active = crtc_state->base.active;
+
+		if (crtc_state->base.active) {
+			dev_priv->active_crtcs |= 1 << crtc->pipe;
+
+			if (IS_BROADWELL(dev_priv)) {
+				pixclk = ilk_pipe_pixel_rate(crtc_state);
+
+				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+				if (crtc_state->ips_enabled)
+					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+			} else if (IS_VALLEYVIEW(dev_priv) ||
+				   IS_CHERRYVIEW(dev_priv) ||
+				   IS_BROXTON(dev_priv))
+				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+			else
+				WARN_ON(dev_priv->display.modeset_calc_cdclk);
+		}
+
+		dev_priv->min_pixclk[crtc->pipe] = pixclk;
 
 		readout_plane_state(crtc);
 
@@ -15713,6 +15827,8 @@
 				 */
 				encoder->base.crtc->state->connector_mask |=
 					1 << drm_connector_index(&connector->base);
+				encoder->base.crtc->state->encoder_mask |=
+					1 << drm_encoder_index(&encoder->base);
 			}
 
 		} else {
@@ -15809,63 +15925,76 @@
 	for_each_intel_crtc(dev, crtc) {
 		unsigned long put_domains;
 
-		put_domains = modeset_get_crtc_power_domains(&crtc->base);
+		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
 		if (WARN_ON(put_domains))
 			modeset_put_power_domains(dev_priv, put_domains);
 	}
 	intel_display_set_init_power(dev_priv, false);
+
+	intel_fbc_init_pipe_state(dev_priv);
 }
 
 void intel_display_resume(struct drm_device *dev)
 {
-	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
-	struct intel_connector *conn;
-	struct intel_plane *plane;
-	struct drm_crtc *crtc;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+	struct drm_modeset_acquire_ctx ctx;
 	int ret;
+	bool setup = false;
 
-	if (!state)
-		return;
+	dev_priv->modeset_restore_state = NULL;
 
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
+	/*
+	 * This is a cludge because with real atomic modeset mode_config.mutex
+	 * won't be taken. Unfortunately some probed state like
+	 * audio_codec_enable is still protected by mode_config.mutex, so lock
+	 * it here for now.
+	 */
+	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_acquire_init(&ctx, 0);
 
-	/* preserve complete old state, including dpll */
-	intel_atomic_get_shared_dpll_state(state);
+retry:
+	ret = drm_modeset_lock_all_ctx(dev, &ctx);
 
-	for_each_crtc(dev, crtc) {
-		struct drm_crtc_state *crtc_state =
-			drm_atomic_get_crtc_state(state, crtc);
+	if (ret == 0 && !setup) {
+		setup = true;
 
-		ret = PTR_ERR_OR_ZERO(crtc_state);
-		if (ret)
-			goto err;
-
-		/* force a restore */
-		crtc_state->mode_changed = true;
+		intel_modeset_setup_hw_state(dev);
+		i915_redisable_vga(dev);
 	}
 
-	for_each_intel_plane(dev, plane) {
-		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
-		if (ret)
-			goto err;
+	if (ret == 0 && state) {
+		struct drm_crtc_state *crtc_state;
+		struct drm_crtc *crtc;
+		int i;
+
+		state->acquire_ctx = &ctx;
+
+		for_each_crtc_in_state(state, crtc, crtc_state, i) {
+			/*
+			 * Force recalculation even if we restore
+			 * current state. With fast modeset this may not result
+			 * in a modeset when the state is compatible.
+			 */
+			crtc_state->mode_changed = true;
+		}
+
+		ret = drm_atomic_commit(state);
 	}
 
-	for_each_intel_connector(dev, conn) {
-		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
-		if (ret)
-			goto err;
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
 	}
 
-	intel_modeset_setup_hw_state(dev);
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	mutex_unlock(&dev->mode_config.mutex);
 
-	i915_redisable_vga(dev);
-	ret = drm_atomic_commit(state);
-	if (!ret)
-		return;
-
-err:
-	DRM_ERROR("Restoring old state failed with %i\n", ret);
-	drm_atomic_state_free(state);
+	if (ret) {
+		DRM_ERROR("Restoring old state failed with %i\n", ret);
+		drm_atomic_state_free(state);
+	}
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -15874,9 +16003,7 @@
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
 	intel_init_gt_powersave(dev);
-	mutex_unlock(&dev->struct_mutex);
 
 	intel_modeset_init_hw(dev);
 
@@ -15943,7 +16070,7 @@
 
 	intel_unregister_dsm_handler();
 
-	intel_fbc_disable(dev_priv);
+	intel_fbc_global_disable(dev_priv);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
@@ -15956,9 +16083,7 @@
 
 	intel_cleanup_overlay(dev);
 
-	mutex_lock(&dev->struct_mutex);
 	intel_cleanup_gt_powersave(dev);
-	mutex_unlock(&dev->struct_mutex);
 
 	intel_teardown_gmbus(dev);
 }
@@ -16153,7 +16278,7 @@
 	for_each_pipe(dev_priv, i) {
 		err_printf(m, "Pipe [%d]:\n", i);
 		err_printf(m, "  Power: %s\n",
-			   error->pipe[i].power_domain_on ? "on" : "off");
+			   onoff(error->pipe[i].power_domain_on));
 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
 
@@ -16181,7 +16306,7 @@
 		err_printf(m, "CPU transcoder: %c\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 		err_printf(m, "  Power: %s\n",
-			   error->transcoder[i].power_domain_on ? "on" : "off");
+			   onoff(error->transcoder[i].power_domain_on));
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
@@ -16191,24 +16316,3 @@
 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
 	}
 }
-
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct intel_crtc *crtc;
-
-	for_each_intel_crtc(dev, crtc) {
-		struct intel_unpin_work *work;
-
-		spin_lock_irq(&dev->event_lock);
-
-		work = crtc->unpin_work;
-
-		if (work && work->event &&
-		    work->event->base.file_priv == file) {
-			kfree(work->event);
-			work->event = NULL;
-		}
-
-		spin_unlock_irq(&dev->event_lock);
-	}
-}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cdc2c15..f069a82 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -157,14 +157,9 @@
 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
 	u8 source_max, sink_max;
 
-	source_max = 4;
-	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
-	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
-		source_max = 2;
-
+	source_max = intel_dig_port->max_lanes;
 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 
 	return min(source_max, sink_max);
@@ -208,6 +203,7 @@
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	int target_clock = mode->clock;
 	int max_rate, mode_rate, max_lanes, max_link_clock;
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
 	if (is_edp(intel_dp) && fixed_mode) {
 		if (mode->hdisplay > fixed_mode->hdisplay)
@@ -225,7 +221,7 @@
 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 	mode_rate = intel_dp_link_required(target_clock, 18);
 
-	if (mode_rate > max_rate)
+	if (mode_rate > max_rate || target_clock > max_dotclk)
 		return MODE_CLOCK_HIGH;
 
 	if (mode->clock < 10000)
@@ -340,8 +336,12 @@
 		release_cl_override = IS_CHERRYVIEW(dev) &&
 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 
-		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
-				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
+			DRM_ERROR("Failed to force on pll for pipe %c!\n",
+				  pipe_name(pipe));
+			return;
+		}
 	}
 
 	/*
@@ -980,7 +980,10 @@
 		if (WARN_ON(txsize > 20))
 			return -E2BIG;
 
-		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+		if (msg->buffer)
+			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+		else
+			WARN_ON(msg->size);
 
 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
 		if (ret > 0) {
@@ -1189,7 +1192,6 @@
 static int
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
-	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
 	int ret;
@@ -1200,7 +1202,7 @@
 	if (!intel_dp->aux.name)
 		return -ENOMEM;
 
-	intel_dp->aux.dev = dev->dev;
+	intel_dp->aux.dev = connector->base.kdev;
 	intel_dp->aux.transfer = intel_dp_aux_transfer;
 
 	DRM_DEBUG_KMS("registering %s bus for %s\n",
@@ -1215,16 +1217,6 @@
 		return ret;
 	}
 
-	ret = sysfs_create_link(&connector->base.kdev->kobj,
-				&intel_dp->aux.ddc.dev.kobj,
-				intel_dp->aux.ddc.dev.kobj.name);
-	if (ret < 0) {
-		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
-			  intel_dp->aux.name, ret);
-		intel_dp_aux_fini(intel_dp);
-		return ret;
-	}
-
 	return 0;
 }
 
@@ -1233,9 +1225,7 @@
 {
 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 
-	if (!intel_connector->mst_port)
-		sysfs_remove_link(&intel_connector->base.kdev->kobj,
-				  intel_dp->aux.ddc.dev.kobj.name);
+	intel_dp_aux_fini(intel_dp);
 	intel_connector_unregister(intel_connector);
 }
 
@@ -1812,12 +1802,21 @@
 
 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
 {
+	ktime_t panel_power_on_time;
+	s64 panel_power_off_duration;
+
 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
 
+	/* take the difference of currrent time and panel power off time
+	 * and then make panel wait for t11_t12 if needed. */
+	panel_power_on_time = ktime_get_boottime();
+	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
+
 	/* When we disable the VDD override bit last we have to do the manual
 	 * wait. */
-	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
-				       intel_dp->panel_power_cycle_delay);
+	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
+		wait_remaining_ms_from_jiffies(jiffies,
+				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
 
 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 }
@@ -1969,7 +1968,7 @@
 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
 
 	if ((pp & POWER_TARGET_ON) == 0)
-		intel_dp->last_power_cycle = jiffies;
+		intel_dp->panel_power_off_time = ktime_get_boottime();
 
 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
 	intel_display_power_put(dev_priv, power_domain);
@@ -2118,7 +2117,7 @@
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
-	intel_dp->last_power_cycle = jiffies;
+	intel_dp->panel_power_off_time = ktime_get_boottime();
 	wait_panel_off(intel_dp);
 
 	/* We got a reference when we enabled the VDD. */
@@ -2243,11 +2242,6 @@
 		_intel_edp_backlight_off(intel_dp);
 }
 
-static const char *state_string(bool enabled)
-{
-	return enabled ? "on" : "off";
-}
-
 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -2257,7 +2251,7 @@
 	I915_STATE_WARN(cur_state != state,
 			"DP port %c state assertion failure (expected %s, current %s)\n",
 			port_name(dig_port->port),
-			state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
 
@@ -2267,7 +2261,7 @@
 
 	I915_STATE_WARN(cur_state != state,
 			"eDP PLL state assertion failure (expected %s, current %s)\n",
-			state_string(state), state_string(cur_state));
+			onoff(state), onoff(cur_state));
 }
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@@ -4024,7 +4018,7 @@
 	} while (--attempts && count);
 
 	if (attempts == 0) {
-		DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
+		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
 		ret = -ETIMEDOUT;
 	}
 
@@ -4564,7 +4558,7 @@
 {
 	if (HAS_PCH_IBX(dev_priv))
 		return ibx_digital_port_connected(dev_priv, port);
-	if (HAS_PCH_SPLIT(dev_priv))
+	else if (HAS_PCH_SPLIT(dev_priv))
 		return cpt_digital_port_connected(dev_priv, port);
 	else if (IS_BROXTON(dev_priv))
 		return bxt_digital_port_connected(dev_priv, port);
@@ -4884,7 +4878,6 @@
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
-	intel_dp_aux_fini(intel_dp);
 	intel_dp_mst_encoder_cleanup(intel_dig_port);
 	if (is_edp(intel_dp)) {
 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -5132,7 +5125,7 @@
 
 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 {
-	intel_dp->last_power_cycle = jiffies;
+	intel_dp->panel_power_off_time = ktime_get_boottime();
 	intel_dp->last_power_on = jiffies;
 	intel_dp->last_backlight_off = jiffies;
 }
@@ -5849,6 +5842,11 @@
 	enum port port = intel_dig_port->port;
 	int type, ret;
 
+	if (WARN(intel_dig_port->max_lanes < 1,
+		 "Not enough lanes (%d) for DP on port %c\n",
+		 intel_dig_port->max_lanes, port_name(port)))
+		return false;
+
 	intel_dp->pps_pipe = INVALID_PIPE;
 
 	/* intel_dp vfuncs */
@@ -6046,6 +6044,7 @@
 
 	intel_dig_port->port = port;
 	intel_dig_port->dp.output_reg = output_reg;
+	intel_dig_port->max_lanes = 4;
 
 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 	if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fa0dabf..a2bd698 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -184,7 +184,9 @@
 	intel_mst->port = found->port;
 
 	if (intel_dp->active_mst_links == 0) {
-		intel_ddi_clk_select(encoder, intel_crtc->config);
+		intel_prepare_ddi_buffer(&intel_dig_port->base);
+
+		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
 
 		intel_dp_set_link_params(intel_dp, intel_crtc->config);
 
@@ -369,6 +371,8 @@
 intel_dp_mst_mode_valid(struct drm_connector *connector,
 			struct drm_display_mode *mode)
 {
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
 	/* TODO - validate mode against available PBN for link */
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
@@ -376,6 +380,9 @@
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		return MODE_H_ILLEGAL;
 
+	if (mode->clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
+
 	return MODE_OK;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index df7f3cb..4c027d6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -246,7 +246,18 @@
 	struct drm_atomic_state base;
 
 	unsigned int cdclk;
-	bool dpll_set;
+
+	/*
+	 * Calculated device cdclk, can be different from cdclk
+	 * only when all crtc's are DPMS off.
+	 */
+	unsigned int dev_cdclk;
+
+	bool dpll_set, modeset;
+
+	unsigned int active_crtcs;
+	unsigned int min_pixclk[I915_MAX_PIPES];
+
 	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
 	struct intel_wm_config wm_config;
 };
@@ -368,6 +379,7 @@
 	bool update_pipe; /* can a fast modeset be performed? */
 	bool disable_cxsr;
 	bool wm_changed; /* watermarks are updated */
+	bool fb_changed; /* fb on any of the planes is changed */
 
 	/* Pipe source size (ie. panel fitter input size)
 	 * All planes will be positioned inside this space,
@@ -481,6 +493,8 @@
 
 	bool ips_enabled;
 
+	bool enable_fbc;
+
 	bool double_wide;
 
 	bool dp_encoder_is_mst;
@@ -531,16 +545,13 @@
  */
 struct intel_crtc_atomic_commit {
 	/* Sleepable operations to perform before commit */
-	bool disable_fbc;
-	bool disable_ips;
-	bool pre_disable_primary;
 
 	/* Sleepable operations to perform after commit */
 	unsigned fb_bits;
-	bool wait_vblank;
-	bool update_fbc;
 	bool post_enable_primary;
-	unsigned update_sprite_watermarks;
+
+	/* Sleepable operations to perform before and after commit */
+	bool update_fbc;
 };
 
 struct intel_crtc {
@@ -564,7 +575,7 @@
 	/* Display surface base address adjustement for pageflips. Note that on
 	 * gen4+ this only adjusts up to a tile, offsets within a tile are
 	 * handled in the hw itself (with the TILEOFF register). */
-	unsigned long dspaddr_offset;
+	u32 dspaddr_offset;
 	int adjusted_x;
 	int adjusted_y;
 
@@ -647,23 +658,17 @@
 	/*
 	 * NOTE: Do not place new plane state fields here (e.g., when adding
 	 * new plane properties).  New runtime state should now be placed in
-	 * the intel_plane_state structure and accessed via drm_plane->state.
+	 * the intel_plane_state structure and accessed via plane_state.
 	 */
 
 	void (*update_plane)(struct drm_plane *plane,
-			     struct drm_crtc *crtc,
-			     struct drm_framebuffer *fb,
-			     int crtc_x, int crtc_y,
-			     unsigned int crtc_w, unsigned int crtc_h,
-			     uint32_t x, uint32_t y,
-			     uint32_t src_w, uint32_t src_h);
+			     const struct intel_crtc_state *crtc_state,
+			     const struct intel_plane_state *plane_state);
 	void (*disable_plane)(struct drm_plane *plane,
 			      struct drm_crtc *crtc);
 	int (*check_plane)(struct drm_plane *plane,
 			   struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *state);
-	void (*commit_plane)(struct drm_plane *plane,
-			     struct intel_plane_state *state);
 };
 
 struct intel_watermark_params {
@@ -765,9 +770,9 @@
 	int backlight_off_delay;
 	struct delayed_work panel_vdd_work;
 	bool want_panel_vdd;
-	unsigned long last_power_cycle;
 	unsigned long last_power_on;
 	unsigned long last_backlight_off;
+	ktime_t panel_power_off_time;
 
 	struct notifier_block edp_notifier;
 
@@ -817,6 +822,7 @@
 	struct intel_hdmi hdmi;
 	enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
 	bool release_cl2_override;
+	uint8_t max_lanes;
 	/* for communication with audio component; protected by av_mutex */
 	const struct drm_connector *audio_connector;
 };
@@ -903,9 +909,7 @@
 };
 
 struct intel_load_detect_pipe {
-	struct drm_framebuffer *release_fb;
-	bool load_detect_temp;
-	int dpms_mode;
+	struct drm_atomic_state *restore_state;
 };
 
 static inline struct intel_encoder *
@@ -988,6 +992,8 @@
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 				     unsigned int pipe_mask);
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+				     unsigned int pipe_mask);
 
 /* intel_crt.c */
 void intel_crt_init(struct drm_device *dev);
@@ -996,7 +1002,7 @@
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
 			  const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi(struct drm_device *dev);
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1041,8 +1047,8 @@
 				   uint64_t fb_format_modifier);
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
 			enum fb_op_origin origin);
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
-			      uint32_t pixel_format);
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+			      uint64_t fb_modifier, uint32_t pixel_format);
 
 /* intel_audio.c */
 void intel_init_audio(struct drm_device *dev);
@@ -1126,9 +1132,8 @@
 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 				    struct drm_plane_state *plane_state);
 
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-		  uint64_t fb_format_modifier, unsigned int plane);
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+			       uint64_t fb_modifier, unsigned int cpp);
 
 static inline bool
 intel_rotation_90_or_270(unsigned int rotation)
@@ -1149,8 +1154,8 @@
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *state);
 
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
-		      const struct dpll *dpll);
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+		     const struct dpll *dpll);
 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
 
 /* modesetting asserts */
@@ -1167,11 +1172,11 @@
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
-					     int *x, int *y,
-					     unsigned int tiling_mode,
-					     unsigned int bpp,
-					     unsigned int pitch);
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+			      int *x, int *y,
+			      uint64_t fb_modifier,
+			      unsigned int cpp,
+			      unsigned int pitch);
 void intel_prepare_reset(struct drm_device *dev);
 void intel_finish_reset(struct drm_device *dev);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1207,7 +1212,6 @@
 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config);
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
@@ -1222,7 +1226,7 @@
 
 /* intel_csr.c */
 void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
+bool intel_csr_load_program(struct drm_i915_private *);
 void intel_csr_ucode_fini(struct drm_i915_private *);
 
 /* intel_dp.c */
@@ -1323,13 +1327,16 @@
 #endif
 
 /* intel_fbc.c */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+			   struct drm_atomic_state *state);
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_deactivate(struct intel_crtc *crtc);
-void intel_fbc_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
 void intel_fbc_enable(struct intel_crtc *crtc);
-void intel_fbc_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_disable_crtc(struct intel_crtc *crtc);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin);
@@ -1558,6 +1565,7 @@
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 0193c62a..01b8e9f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -478,8 +478,8 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	intel_dsi_prepare(encoder);
 	intel_enable_dsi_pll(encoder);
+	intel_dsi_prepare(encoder);
 
 	/* Panel Enable over CRC PMIC */
 	if (intel_dsi->gpio_panel)
@@ -634,7 +634,6 @@
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	u32 val;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -642,9 +641,13 @@
 
 	intel_dsi_clear_device_ready(encoder);
 
-	val = I915_READ(DSPCLK_GATE_D);
-	val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(DSPCLK_GATE_D, val);
+	if (!IS_BROXTON(dev_priv)) {
+		u32 val;
+
+		val = I915_READ(DSPCLK_GATE_D);
+		val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+		I915_WRITE(DSPCLK_GATE_D, val);
+	}
 
 	drm_panel_unprepare(intel_dsi->panel);
 
@@ -709,7 +712,7 @@
 static void intel_dsi_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
-	u32 pclk = 0;
+	u32 pclk;
 	DRM_DEBUG_KMS("\n");
 
 	pipe_config->has_dsi_encoder = true;
@@ -720,12 +723,7 @@
 	 */
 	pipe_config->dpll_hw_state.dpll_md = 0;
 
-	if (IS_BROXTON(encoder->base.dev))
-		pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-	else if (IS_VALLEYVIEW(encoder->base.dev) ||
-		 IS_CHERRYVIEW(encoder->base.dev))
-		pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-
+	pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
 	if (!pclk)
 		return;
 
@@ -787,10 +785,9 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
-	unsigned int bpp = intel_crtc->config->pipe_bpp;
+	unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
 	unsigned int lane_count = intel_dsi->lane_count;
 
 	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -861,7 +858,7 @@
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 	enum port port;
-	unsigned int bpp = intel_crtc->config->pipe_bpp;
+	unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
 	u32 val, tmp;
 	u16 mode_hdisplay;
 
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 02551ff..92f3922 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,6 +34,8 @@
 #define DSI_DUAL_LINK_FRONT_BACK	1
 #define DSI_DUAL_LINK_PIXEL_ALT		2
 
+int dsi_pixel_format_bpp(int pixel_format);
+
 struct intel_dsi_host;
 
 struct intel_dsi {
@@ -126,8 +128,7 @@
 
 extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
 extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
 extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
 							enum port port);
 
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e8113ad..7f145b4 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -234,28 +234,33 @@
 	if (!gtable[gpio].init) {
 		/* program the function */
 		/* FIXME: remove constant below */
-		vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+		vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
+				  0x2000CC00);
 		gtable[gpio].init = 1;
 	}
 
 	val = 0x4 | action;
 
 	/* pull up/down */
-	vlv_gpio_nc_write(dev_priv, pad, val);
+	vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
 	mutex_unlock(&dev_priv->sb_lock);
 
 out:
 	return data;
 }
 
+static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+{
+	return data + *(data + 6) + 7;
+}
+
 typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
 					const u8 *data);
 static const fn_mipi_elem_exec exec_elem[] = {
-	NULL, /* reserved */
-	mipi_exec_send_packet,
-	mipi_exec_delay,
-	mipi_exec_gpio,
-	NULL, /* status read; later */
+	[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
+	[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
+	[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
+	[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
 };
 
 /*
@@ -265,107 +270,114 @@
  */
 
 static const char * const seq_name[] = {
-	"UNDEFINED",
-	"MIPI_SEQ_ASSERT_RESET",
-	"MIPI_SEQ_INIT_OTP",
-	"MIPI_SEQ_DISPLAY_ON",
-	"MIPI_SEQ_DISPLAY_OFF",
-	"MIPI_SEQ_DEASSERT_RESET"
+	[MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+	[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
+	[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
+	[MIPI_SEQ_DISPLAY_OFF]  = "MIPI_SEQ_DISPLAY_OFF",
+	[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+	[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
+	[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
+	[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+	[MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
+	[MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
+	[MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
 };
 
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
+static const char *sequence_name(enum mipi_seq seq_id)
 {
-	fn_mipi_elem_exec mipi_elem_exec;
-	int index;
+	if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+		return seq_name[seq_id];
+	else
+		return "(unknown)";
+}
 
-	if (!data)
+static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
+{
+	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+	struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+	const u8 *data;
+	fn_mipi_elem_exec mipi_elem_exec;
+
+	if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
 		return;
 
-	DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+	data = dev_priv->vbt.dsi.sequence[seq_id];
+	if (!data) {
+		DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
+			      seq_id, sequence_name(seq_id));
+		return;
+	}
 
-	/* go to the first element of the sequence */
+	WARN_ON(*data != seq_id);
+
+	DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
+		      seq_id, sequence_name(seq_id));
+
+	/* Skip Sequence Byte. */
 	data++;
 
-	/* parse each byte till we reach end of sequence byte - 0x00 */
+	/* Skip Size of Sequence. */
+	if (dev_priv->vbt.dsi.seq_version >= 3)
+		data += 4;
+
 	while (1) {
-		index = *data;
-		mipi_elem_exec = exec_elem[index];
-		if (!mipi_elem_exec) {
-			DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
+		u8 operation_byte = *data++;
+		u8 operation_size = 0;
+
+		if (operation_byte == MIPI_SEQ_ELEM_END)
+			break;
+
+		if (operation_byte < ARRAY_SIZE(exec_elem))
+			mipi_elem_exec = exec_elem[operation_byte];
+		else
+			mipi_elem_exec = NULL;
+
+		/* Size of Operation. */
+		if (dev_priv->vbt.dsi.seq_version >= 3)
+			operation_size = *data++;
+
+		if (mipi_elem_exec) {
+			data = mipi_elem_exec(intel_dsi, data);
+		} else if (operation_size) {
+			/* We have size, skip. */
+			DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
+				      operation_byte);
+			data += operation_size;
+		} else {
+			/* No size, can't skip without parsing. */
+			DRM_ERROR("Unsupported MIPI operation byte %u\n",
+				  operation_byte);
 			return;
 		}
-
-		/* goto element payload */
-		data++;
-
-		/* execute the element specific rotines */
-		data = mipi_elem_exec(intel_dsi, data);
-
-		/*
-		 * After processing the element, data should point to
-		 * next element or end of sequence
-		 * check if have we reached end of sequence
-		 */
-		if (*data == 0x00)
-			break;
 	}
 }
 
 static int vbt_panel_prepare(struct drm_panel *panel)
 {
-	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const u8 *sequence;
-
-	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
-	generic_exec_sequence(intel_dsi, sequence);
-
-	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-	generic_exec_sequence(intel_dsi, sequence);
+	generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+	generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
 
 	return 0;
 }
 
 static int vbt_panel_unprepare(struct drm_panel *panel)
 {
-	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const u8 *sequence;
-
-	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
-	generic_exec_sequence(intel_dsi, sequence);
+	generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
 
 	return 0;
 }
 
 static int vbt_panel_enable(struct drm_panel *panel)
 {
-	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const u8 *sequence;
-
-	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
-	generic_exec_sequence(intel_dsi, sequence);
+	generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
 
 	return 0;
 }
 
 static int vbt_panel_disable(struct drm_panel *panel)
 {
-	struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-	struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-	struct drm_device *dev = intel_dsi->base.base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	const u8 *sequence;
-
-	sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
-	generic_exec_sequence(intel_dsi, sequence);
+	generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
 
 	return 0;
 }
@@ -428,10 +440,7 @@
 	intel_dsi->dual_link = mipi_config->dual_link;
 	intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
 
-	if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
-		bits_per_pixel = 18;
-	else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
-		bits_per_pixel = 16;
+	bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
 
 	intel_dsi->operation_mode = mipi_config->is_cmd_mode;
 	intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
@@ -685,6 +694,8 @@
 
 	/* This is cheating a bit with the cleanup. */
 	vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
+	if (!vbt_panel)
+		return NULL;
 
 	vbt_panel->intel_dsi = intel_dsi;
 	drm_panel_init(&vbt_panel->panel);
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index fbd2b51..70883c5 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,15 +30,7 @@
 #include "i915_drv.h"
 #include "intel_dsi.h"
 
-#define DSI_HSS_PACKET_SIZE		4
-#define DSI_HSE_PACKET_SIZE		4
-#define DSI_HSA_PACKET_EXTRA_SIZE	6
-#define DSI_HBP_PACKET_EXTRA_SIZE	6
-#define DSI_HACTIVE_PACKET_EXTRA_SIZE	6
-#define DSI_HFP_PACKET_EXTRA_SIZE	6
-#define DSI_EOTP_PACKET_SIZE		4
-
-static int dsi_pixel_format_bpp(int pixel_format)
+int dsi_pixel_format_bpp(int pixel_format)
 {
 	int bpp;
 
@@ -71,77 +63,6 @@
 	71, 35, 273, 136, 324, 418, 465, 488, 500, 506		/* 91 - 100 */
 };
 
-#ifdef DSI_CLK_FROM_RR
-
-static u32 dsi_rr_formula(const struct drm_display_mode *mode,
-			  int pixel_format, int video_mode_format,
-			  int lane_count, bool eotp)
-{
-	u32 bpp;
-	u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
-	u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
-	u32 bytes_per_line, bytes_per_frame;
-	u32 num_frames;
-	u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
-	u32 dsi_bit_clock_hz;
-	u32 dsi_clk;
-
-	bpp = dsi_pixel_format_bpp(pixel_format);
-
-	hactive = mode->hdisplay;
-	vactive = mode->vdisplay;
-	hfp = mode->hsync_start - mode->hdisplay;
-	hsync = mode->hsync_end - mode->hsync_start;
-	hbp = mode->htotal - mode->hsync_end;
-
-	vfp = mode->vsync_start - mode->vdisplay;
-	vsync = mode->vsync_end - mode->vsync_start;
-	vbp = mode->vtotal - mode->vsync_end;
-
-	hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
-	hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
-	hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
-	hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
-
-	bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
-		DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
-		hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
-		hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
-		hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
-
-	/*
-	 * XXX: Need to accurately calculate LP to HS transition timeout and add
-	 * it to bytes_per_line/bytes_per_frame.
-	 */
-
-	if (eotp && video_mode_format == VIDEO_MODE_BURST)
-		bytes_per_line += DSI_EOTP_PACKET_SIZE;
-
-	bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
-		vactive * bytes_per_line + vfp * bytes_per_line;
-
-	if (eotp &&
-	    (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
-	     video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
-		bytes_per_frame += DSI_EOTP_PACKET_SIZE;
-
-	num_frames = drm_mode_vrefresh(mode);
-	bytes_per_x_frames = num_frames * bytes_per_frame;
-
-	bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
-
-	/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
-	dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
-	dsi_clk = dsi_bit_clock_hz / 1000;
-
-	if (eotp && video_mode_format == VIDEO_MODE_BURST)
-		dsi_clk *= 2;
-
-	return dsi_clk;
-}
-
-#else
-
 /* Get DSI clock from pixel clock */
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
@@ -155,8 +76,6 @@
 	return dsi_clk_khz;
 }
 
-#endif
-
 static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
 			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
@@ -322,7 +241,7 @@
 	     bpp, pipe_bpp);
 }
 
-u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -384,7 +303,7 @@
 	return pclk;
 }
 
-u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
 {
 	u32 pclk;
 	u32 dsi_clk;
@@ -419,6 +338,14 @@
 	return pclk;
 }
 
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+	if (IS_BROXTON(encoder->base.dev))
+		return bxt_dsi_get_pclk(encoder, pipe_bpp);
+	else
+		return vlv_dsi_get_pclk(encoder, pipe_bpp);
+}
+
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
 	u32 temp;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index a1988a4..0f0492f 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -43,7 +43,7 @@
 
 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
 {
-	return dev_priv->fbc.activate != NULL;
+	return HAS_FBC(dev_priv);
 }
 
 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
@@ -56,6 +56,11 @@
 	return INTEL_INFO(dev_priv)->gen < 4;
 }
 
+static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
+{
+	return INTEL_INFO(dev_priv)->gen <= 3;
+}
+
 /*
  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -74,19 +79,17 @@
  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  * we wrote to PIPESRC.
  */
-static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
 					    int *width, int *height)
 {
-	struct intel_plane_state *plane_state =
-			to_intel_plane_state(crtc->base.primary->state);
 	int w, h;
 
-	if (intel_rotation_90_or_270(plane_state->base.rotation)) {
-		w = drm_rect_height(&plane_state->src) >> 16;
-		h = drm_rect_width(&plane_state->src) >> 16;
+	if (intel_rotation_90_or_270(cache->plane.rotation)) {
+		w = cache->plane.src_h;
+		h = cache->plane.src_w;
 	} else {
-		w = drm_rect_width(&plane_state->src) >> 16;
-		h = drm_rect_height(&plane_state->src) >> 16;
+		w = cache->plane.src_w;
+		h = cache->plane.src_h;
 	}
 
 	if (width)
@@ -95,26 +98,23 @@
 		*height = h;
 }
 
-static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
-					struct drm_framebuffer *fb)
+static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+					struct intel_fbc_state_cache *cache)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	int lines;
 
-	intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+	intel_fbc_get_plane_source_size(cache, NULL, &lines);
 	if (INTEL_INFO(dev_priv)->gen >= 7)
 		lines = min(lines, 2048);
 
 	/* Hardware needs the full buffer stride, not just the active area. */
-	return lines * fb->pitches[0];
+	return lines * cache->fb.stride;
 }
 
 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
 	u32 fbc_ctl;
 
-	dev_priv->fbc.active = false;
-
 	/* Disable compression */
 	fbc_ctl = I915_READ(FBC_CONTROL);
 	if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -130,21 +130,17 @@
 	}
 }
 
-static void i8xx_fbc_activate(struct intel_crtc *crtc)
+static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
 	int cfb_pitch;
 	int i;
 	u32 fbc_ctl;
 
-	dev_priv->fbc.active = true;
-
 	/* Note: fbc.threshold == 1 for i8xx */
-	cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
-	if (fb->pitches[0] < cfb_pitch)
-		cfb_pitch = fb->pitches[0];
+	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
+	if (params->fb.stride < cfb_pitch)
+		cfb_pitch = params->fb.stride;
 
 	/* FBC_CTL wants 32B or 64B units */
 	if (IS_GEN2(dev_priv))
@@ -161,9 +157,9 @@
 
 		/* Set it up... */
 		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
+		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-		I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
+		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
 	}
 
 	/* enable it... */
@@ -173,7 +169,7 @@
 	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-	fbc_ctl |= obj->fence_reg;
+	fbc_ctl |= params->fb.fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -182,23 +178,19 @@
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_activate(struct intel_crtc *crtc)
+static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.active = true;
-
-	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
-	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
+	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+	dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
-	I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
+	I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -208,8 +200,6 @@
 {
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.active = false;
-
 	/* Disable compression */
 	dpfc_ctl = I915_READ(DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
@@ -230,19 +220,14 @@
 	POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_activate(struct intel_crtc *crtc)
+static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
 	u32 dpfc_ctl;
 	int threshold = dev_priv->fbc.threshold;
-	unsigned int y_offset;
 
-	dev_priv->fbc.active = true;
-
-	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
-	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
+	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
 		threshold++;
 
 	switch (threshold) {
@@ -259,18 +244,17 @@
 	}
 	dpfc_ctl |= DPFC_CTL_FENCE_EN;
 	if (IS_GEN5(dev_priv))
-		dpfc_ctl |= obj->fence_reg;
+		dpfc_ctl |= params->fb.fence_reg;
 
-	y_offset = get_crtc_fence_y_offset(crtc);
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
-	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+	I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 	if (IS_GEN6(dev_priv)) {
 		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
+			   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 	}
 
 	intel_fbc_recompress(dev_priv);
@@ -280,8 +264,6 @@
 {
 	u32 dpfc_ctl;
 
-	dev_priv->fbc.active = false;
-
 	/* Disable compression */
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
@@ -295,21 +277,17 @@
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_activate(struct intel_crtc *crtc)
+static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
 	u32 dpfc_ctl;
 	int threshold = dev_priv->fbc.threshold;
 
-	dev_priv->fbc.active = true;
-
 	dpfc_ctl = 0;
 	if (IS_IVYBRIDGE(dev_priv))
-		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
 
-	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+	if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
 		threshold++;
 
 	switch (threshold) {
@@ -337,20 +315,60 @@
 			   ILK_FBCQ_DIS);
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
-			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
+		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
+			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
 			   HSW_FBCQ_DIS);
 	}
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
-		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
+		   SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 
 	intel_fbc_recompress(dev_priv);
 }
 
+static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		return ilk_fbc_is_active(dev_priv);
+	else if (IS_GM45(dev_priv))
+		return g4x_fbc_is_active(dev_priv);
+	else
+		return i8xx_fbc_is_active(dev_priv);
+}
+
+static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	fbc->active = true;
+
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		gen7_fbc_activate(dev_priv);
+	else if (INTEL_INFO(dev_priv)->gen >= 5)
+		ilk_fbc_activate(dev_priv);
+	else if (IS_GM45(dev_priv))
+		g4x_fbc_activate(dev_priv);
+	else
+		i8xx_fbc_activate(dev_priv);
+}
+
+static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	fbc->active = false;
+
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		ilk_fbc_deactivate(dev_priv);
+	else if (IS_GM45(dev_priv))
+		g4x_fbc_deactivate(dev_priv);
+	else
+		i8xx_fbc_deactivate(dev_priv);
+}
+
 /**
  * intel_fbc_is_active - Is FBC active?
  * @dev_priv: i915 device instance
@@ -364,24 +382,24 @@
 	return dev_priv->fbc.active;
 }
 
-static void intel_fbc_activate(const struct drm_framebuffer *fb)
-{
-	struct drm_i915_private *dev_priv = fb->dev->dev_private;
-	struct intel_crtc *crtc = dev_priv->fbc.crtc;
-
-	dev_priv->fbc.activate(crtc);
-
-	dev_priv->fbc.fb_id = fb->base.id;
-	dev_priv->fbc.y = crtc->base.y;
-}
-
 static void intel_fbc_work_fn(struct work_struct *__work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(__work, struct drm_i915_private, fbc.work.work);
-	struct intel_fbc_work *work = &dev_priv->fbc.work;
-	struct intel_crtc *crtc = dev_priv->fbc.crtc;
-	int delay_ms = 50;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_work *work = &fbc->work;
+	struct intel_crtc *crtc = fbc->crtc;
+	struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+
+	if (drm_crtc_vblank_get(&crtc->base)) {
+		DRM_ERROR("vblank not available for FBC on pipe %c\n",
+			  pipe_name(crtc->pipe));
+
+		mutex_lock(&fbc->lock);
+		work->scheduled = false;
+		mutex_unlock(&fbc->lock);
+		return;
+	}
 
 retry:
 	/* Delay the actual enabling to let pageflipping cease and the
@@ -390,142 +408,97 @@
 	 * vblank to pass after disabling the FBC before we attempt
 	 * to modify the control registers.
 	 *
-	 * A more complicated solution would involve tracking vblanks
-	 * following the termination of the page-flipping sequence
-	 * and indeed performing the enable as a co-routine and not
-	 * waiting synchronously upon the vblank.
-	 *
 	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+	 *
+	 * It is also worth mentioning that since work->scheduled_vblank can be
+	 * updated multiple times by the other threads, hitting the timeout is
+	 * not an error condition. We'll just end up hitting the "goto retry"
+	 * case below.
 	 */
-	wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
+	wait_event_timeout(vblank->queue,
+		drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
+		msecs_to_jiffies(50));
 
-	mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
 
 	/* Were we cancelled? */
 	if (!work->scheduled)
 		goto out;
 
 	/* Were we delayed again while this function was sleeping? */
-	if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
-		       jiffies)) {
-		mutex_unlock(&dev_priv->fbc.lock);
+	if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
+		mutex_unlock(&fbc->lock);
 		goto retry;
 	}
 
-	if (crtc->base.primary->fb == work->fb)
-		intel_fbc_activate(work->fb);
+	intel_fbc_hw_activate(dev_priv);
 
 	work->scheduled = false;
 
 out:
-	mutex_unlock(&dev_priv->fbc.lock);
-}
-
-static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
-{
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-	dev_priv->fbc.work.scheduled = false;
+	mutex_unlock(&fbc->lock);
+	drm_crtc_vblank_put(&crtc->base);
 }
 
 static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct intel_fbc_work *work = &dev_priv->fbc.work;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_work *work = &fbc->work;
 
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+	WARN_ON(!mutex_is_locked(&fbc->lock));
 
-	/* It is useless to call intel_fbc_cancel_work() in this function since
-	 * we're not releasing fbc.lock, so it won't have an opportunity to grab
-	 * it to discover that it was cancelled. So we just update the expected
-	 * jiffy count. */
-	work->fb = crtc->base.primary->fb;
+	if (drm_crtc_vblank_get(&crtc->base)) {
+		DRM_ERROR("vblank not available for FBC on pipe %c\n",
+			  pipe_name(crtc->pipe));
+		return;
+	}
+
+	/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
+	 * this function since we're not releasing fbc.lock, so it won't have an
+	 * opportunity to grab it to discover that it was cancelled. So we just
+	 * update the expected jiffy count. */
 	work->scheduled = true;
-	work->enable_jiffies = jiffies;
+	work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
+	drm_crtc_vblank_put(&crtc->base);
 
 	schedule_work(&work->work);
 }
 
-static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	intel_fbc_cancel_work(dev_priv);
+	WARN_ON(!mutex_is_locked(&fbc->lock));
 
-	if (dev_priv->fbc.active)
-		dev_priv->fbc.deactivate(dev_priv);
+	/* Calling cancel_work() here won't help due to the fact that the work
+	 * function grabs fbc->lock. Just set scheduled to false so the work
+	 * function can know it was cancelled. */
+	fbc->work.scheduled = false;
+
+	if (fbc->active)
+		intel_fbc_hw_deactivate(dev_priv);
 }
 
-/*
- * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
- * @crtc: the CRTC
- *
- * This function deactivates FBC if it's associated with the provided CRTC.
- */
-void intel_fbc_deactivate(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_plane *primary = crtc->base.primary;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	enum pipe pipe = crtc->pipe;
 
-	if (!fbc_supported(dev_priv))
-		return;
-
-	mutex_lock(&dev_priv->fbc.lock);
-	if (dev_priv->fbc.crtc == crtc)
-		__intel_fbc_deactivate(dev_priv);
-	mutex_unlock(&dev_priv->fbc.lock);
-}
-
-static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
-			      const char *reason)
-{
-	if (dev_priv->fbc.no_fbc_reason == reason)
-		return;
-
-	dev_priv->fbc.no_fbc_reason = reason;
-	DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
-}
-
-static bool crtc_can_fbc(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
-	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
-		return false;
-
-	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
-		return false;
-
-	return true;
-}
-
-static bool crtc_is_valid(struct intel_crtc *crtc)
-{
-	if (!intel_crtc_active(&crtc->base))
-		return false;
-
-	if (!to_intel_plane_state(crtc->base.primary->state)->visible)
-		return false;
-
-	return true;
-}
-
-static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-	int n_pipes = 0;
-	struct drm_crtc *crtc;
-
-	if (INTEL_INFO(dev_priv)->gen > 4)
+	/* Don't even bother tracking anything we don't need. */
+	if (!no_fbc_on_multiple_pipes(dev_priv))
 		return true;
 
-	for_each_pipe(dev_priv, pipe) {
-		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	WARN_ON(!drm_modeset_is_locked(&primary->mutex));
 
-		if (intel_crtc_active(crtc) &&
-		    to_intel_plane_state(crtc->primary->state)->visible)
-			n_pipes++;
-	}
+	if (to_intel_plane_state(primary->state)->visible)
+		fbc->visible_pipes_mask |= (1 << pipe);
+	else
+		fbc->visible_pipes_mask &= ~(1 << pipe);
 
-	return (n_pipes < 2);
+	return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
 }
 
 static int find_compression_threshold(struct drm_i915_private *dev_priv,
@@ -581,16 +554,16 @@
 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb = crtc->base.primary->state->fb;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct drm_mm_node *uninitialized_var(compressed_llb);
 	int size, fb_cpp, ret;
 
-	WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
+	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
 
-	size = intel_fbc_calculate_cfb_size(crtc, fb);
-	fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
+	fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
 
-	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
 					 size, fb_cpp);
 	if (!ret)
 		goto err_llb;
@@ -599,12 +572,12 @@
 
 	}
 
-	dev_priv->fbc.threshold = ret;
+	fbc->threshold = ret;
 
 	if (INTEL_INFO(dev_priv)->gen >= 5)
-		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
 	else if (IS_GM45(dev_priv)) {
-		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
 	} else {
 		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
 		if (!compressed_llb)
@@ -615,23 +588,22 @@
 		if (ret)
 			goto err_fb;
 
-		dev_priv->fbc.compressed_llb = compressed_llb;
+		fbc->compressed_llb = compressed_llb;
 
 		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+			   dev_priv->mm.stolen_base + fbc->compressed_fb.start);
 		I915_WRITE(FBC_LL_BASE,
 			   dev_priv->mm.stolen_base + compressed_llb->start);
 	}
 
 	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
-		      dev_priv->fbc.compressed_fb.size,
-		      dev_priv->fbc.threshold);
+		      fbc->compressed_fb.size, fbc->threshold);
 
 	return 0;
 
 err_fb:
 	kfree(compressed_llb);
-	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 err_llb:
 	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
 	return -ENOSPC;
@@ -639,25 +611,27 @@
 
 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
-	if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
-		i915_gem_stolen_remove_node(dev_priv,
-					    &dev_priv->fbc.compressed_fb);
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (dev_priv->fbc.compressed_llb) {
-		i915_gem_stolen_remove_node(dev_priv,
-					    dev_priv->fbc.compressed_llb);
-		kfree(dev_priv->fbc.compressed_llb);
+	if (drm_mm_node_allocated(&fbc->compressed_fb))
+		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+
+	if (fbc->compressed_llb) {
+		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+		kfree(fbc->compressed_llb);
 	}
 }
 
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
 	if (!fbc_supported(dev_priv))
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
 	__intel_fbc_cleanup_cfb(dev_priv);
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&fbc->lock);
 }
 
 static bool stride_is_valid(struct drm_i915_private *dev_priv,
@@ -681,19 +655,17 @@
 	return true;
 }
 
-static bool pixel_format_is_valid(struct drm_framebuffer *fb)
+static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
+				  uint32_t pixel_format)
 {
-	struct drm_device *dev = fb->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	switch (fb->pixel_format) {
+	switch (pixel_format) {
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_XBGR8888:
 		return true;
 	case DRM_FORMAT_XRGB1555:
 	case DRM_FORMAT_RGB565:
 		/* 16bpp not supported on gen2 */
-		if (IS_GEN2(dev))
+		if (IS_GEN2(dev_priv))
 			return false;
 		/* WaFbcOnly1to1Ratio:ctg */
 		if (IS_G4X(dev_priv))
@@ -713,6 +685,7 @@
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 	unsigned int effective_w, effective_h, max_w, max_h;
 
 	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
@@ -726,87 +699,105 @@
 		max_h = 1536;
 	}
 
-	intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
+	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
+					&effective_h);
 	effective_w += crtc->adjusted_x;
 	effective_h += crtc->adjusted_y;
 
 	return effective_w <= max_w && effective_h <= max_h;
 }
 
-/**
- * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
- * @crtc: the CRTC that triggered the update
- *
- * This function completely reevaluates the status of FBC, then activates,
- * deactivates or maintains it on the same state.
- */
-static void __intel_fbc_update(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-	struct drm_framebuffer *fb;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_state_cache *cache = &fbc->state_cache;
+	struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	struct intel_plane_state *plane_state =
+		to_intel_plane_state(crtc->base.primary->state);
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_i915_gem_object *obj;
-	const struct drm_display_mode *adjusted_mode;
 
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+	WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
+	WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
 
-	if (!multiple_pipes_ok(dev_priv)) {
-		set_no_fbc_reason(dev_priv, "more than one pipe active");
-		goto out_disable;
-	}
+	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		cache->crtc.hsw_bdw_pixel_rate =
+			ilk_pipe_pixel_rate(crtc_state);
 
-	if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
+	cache->plane.rotation = plane_state->base.rotation;
+	cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
+	cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
+	cache->plane.visible = plane_state->visible;
+
+	if (!cache->plane.visible)
 		return;
 
-	if (!crtc_is_valid(crtc)) {
-		set_no_fbc_reason(dev_priv, "no output");
-		goto out_disable;
+	obj = intel_fb_obj(fb);
+
+	/* FIXME: We lack the proper locking here, so only run this on the
+	 * platforms that need. */
+	if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+		cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+	cache->fb.pixel_format = fb->pixel_format;
+	cache->fb.stride = fb->pitches[0];
+	cache->fb.fence_reg = obj->fence_reg;
+	cache->fb.tiling_mode = obj->tiling_mode;
+}
+
+static bool intel_fbc_can_activate(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+	if (!cache->plane.visible) {
+		fbc->no_fbc_reason = "primary plane not visible";
+		return false;
 	}
 
-	fb = crtc->base.primary->fb;
-	obj = intel_fb_obj(fb);
-	adjusted_mode = &crtc->config->base.adjusted_mode;
-
-	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-		set_no_fbc_reason(dev_priv, "incompatible mode");
-		goto out_disable;
+	if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
+	    (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
+		fbc->no_fbc_reason = "incompatible mode";
+		return false;
 	}
 
 	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
-		set_no_fbc_reason(dev_priv, "mode too large for compression");
-		goto out_disable;
+		fbc->no_fbc_reason = "mode too large for compression";
+		return false;
 	}
 
 	/* The use of a CPU fence is mandatory in order to detect writes
 	 * by the CPU to the scanout and trigger updates to the FBC.
 	 */
-	if (obj->tiling_mode != I915_TILING_X ||
-	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
-		goto out_disable;
+	if (cache->fb.tiling_mode != I915_TILING_X ||
+	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+		return false;
 	}
 	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
-	    crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
-		set_no_fbc_reason(dev_priv, "rotation unsupported");
-		goto out_disable;
+	    cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+		fbc->no_fbc_reason = "rotation unsupported";
+		return false;
 	}
 
-	if (!stride_is_valid(dev_priv, fb->pitches[0])) {
-		set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
-		goto out_disable;
+	if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+		fbc->no_fbc_reason = "framebuffer stride not supported";
+		return false;
 	}
 
-	if (!pixel_format_is_valid(fb)) {
-		set_no_fbc_reason(dev_priv, "pixel format is invalid");
-		goto out_disable;
+	if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
+		fbc->no_fbc_reason = "pixel format is invalid";
+		return false;
 	}
 
 	/* WaFbcExceedCdClockThreshold:hsw,bdw */
 	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
-	    ilk_pipe_pixel_rate(crtc->config) >=
-	    dev_priv->cdclk_freq * 95 / 100) {
-		set_no_fbc_reason(dev_priv, "pixel rate is too big");
-		goto out_disable;
+	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
+		fbc->no_fbc_reason = "pixel rate is too big";
+		return false;
 	}
 
 	/* It is possible for the required CFB size change without a
@@ -819,189 +810,322 @@
 	 * we didn't get any invalidate/deactivate calls, but this would require
 	 * a lot of tracking just for a specific case. If we conclude it's an
 	 * important case, we can implement it later. */
-	if (intel_fbc_calculate_cfb_size(crtc, fb) >
-	    dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
-		set_no_fbc_reason(dev_priv, "CFB requirements changed");
-		goto out_disable;
+	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+	    fbc->compressed_fb.size * fbc->threshold) {
+		fbc->no_fbc_reason = "CFB requirements changed";
+		return false;
 	}
 
+	return true;
+}
+
+static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	bool enable_by_default = IS_HASWELL(dev_priv) ||
+				 IS_BROADWELL(dev_priv);
+
+	if (intel_vgpu_active(dev_priv->dev)) {
+		fbc->no_fbc_reason = "VGPU is active";
+		return false;
+	}
+
+	if (i915.enable_fbc < 0 && !enable_by_default) {
+		fbc->no_fbc_reason = "disabled per chip default";
+		return false;
+	}
+
+	if (!i915.enable_fbc) {
+		fbc->no_fbc_reason = "disabled per module param";
+		return false;
+	}
+
+	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
+		fbc->no_fbc_reason = "no enabled pipes can have FBC";
+		return false;
+	}
+
+	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
+		fbc->no_fbc_reason = "no enabled planes can have FBC";
+		return false;
+	}
+
+	return true;
+}
+
+static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
+				     struct intel_fbc_reg_params *params)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+	/* Since all our fields are integer types, use memset here so the
+	 * comparison function can rely on memcmp because the padding will be
+	 * zero. */
+	memset(params, 0, sizeof(*params));
+
+	params->crtc.pipe = crtc->pipe;
+	params->crtc.plane = crtc->plane;
+	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+
+	params->fb.pixel_format = cache->fb.pixel_format;
+	params->fb.stride = cache->fb.stride;
+	params->fb.fence_reg = cache->fb.fence_reg;
+
+	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+	params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
+}
+
+static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
+				       struct intel_fbc_reg_params *params2)
+{
+	/* We can use this since intel_fbc_get_reg_params() does a memset. */
+	return memcmp(params1, params2, sizeof(*params1)) == 0;
+}
+
+void intel_fbc_pre_update(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	if (!fbc_supported(dev_priv))
+		return;
+
+	mutex_lock(&fbc->lock);
+
+	if (!multiple_pipes_ok(crtc)) {
+		fbc->no_fbc_reason = "more than one pipe active";
+		goto deactivate;
+	}
+
+	if (!fbc->enabled || fbc->crtc != crtc)
+		goto unlock;
+
+	intel_fbc_update_state_cache(crtc);
+
+deactivate:
+	intel_fbc_deactivate(dev_priv);
+unlock:
+	mutex_unlock(&fbc->lock);
+}
+
+static void __intel_fbc_post_update(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_fbc_reg_params old_params;
+
+	WARN_ON(!mutex_is_locked(&fbc->lock));
+
+	if (!fbc->enabled || fbc->crtc != crtc)
+		return;
+
+	if (!intel_fbc_can_activate(crtc)) {
+		WARN_ON(fbc->active);
+		return;
+	}
+
+	old_params = fbc->params;
+	intel_fbc_get_reg_params(crtc, &fbc->params);
+
 	/* If the scanout has not changed, don't modify the FBC settings.
 	 * Note that we make the fundamental assumption that the fb->obj
 	 * cannot be unpinned (and have its GTT offset and fence revoked)
 	 * without first being decoupled from the scanout and FBC disabled.
 	 */
-	if (dev_priv->fbc.crtc == crtc &&
-	    dev_priv->fbc.fb_id == fb->base.id &&
-	    dev_priv->fbc.y == crtc->base.y &&
-	    dev_priv->fbc.active)
+	if (fbc->active &&
+	    intel_fbc_reg_params_equal(&old_params, &fbc->params))
 		return;
 
-	if (intel_fbc_is_active(dev_priv)) {
-		/* We update FBC along two paths, after changing fb/crtc
-		 * configuration (modeswitching) and after page-flipping
-		 * finishes. For the latter, we know that not only did
-		 * we disable the FBC at the start of the page-flip
-		 * sequence, but also more than one vblank has passed.
-		 *
-		 * For the former case of modeswitching, it is possible
-		 * to switch between two FBC valid configurations
-		 * instantaneously so we do need to disable the FBC
-		 * before we can modify its control registers. We also
-		 * have to wait for the next vblank for that to take
-		 * effect. However, since we delay enabling FBC we can
-		 * assume that a vblank has passed since disabling and
-		 * that we can safely alter the registers in the deferred
-		 * callback.
-		 *
-		 * In the scenario that we go from a valid to invalid
-		 * and then back to valid FBC configuration we have
-		 * no strict enforcement that a vblank occurred since
-		 * disabling the FBC. However, along all current pipe
-		 * disabling paths we do need to wait for a vblank at
-		 * some point. And we wait before enabling FBC anyway.
-		 */
-		DRM_DEBUG_KMS("deactivating FBC for update\n");
-		__intel_fbc_deactivate(dev_priv);
-	}
-
+	intel_fbc_deactivate(dev_priv);
 	intel_fbc_schedule_activation(crtc);
-	dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
-	return;
-
-out_disable:
-	/* Multiple disables should be harmless */
-	if (intel_fbc_is_active(dev_priv)) {
-		DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
-		__intel_fbc_deactivate(dev_priv);
-	}
+	fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
 }
 
-/*
- * intel_fbc_update - activate/deactivate FBC as needed
- * @crtc: the CRTC that triggered the update
- *
- * This function reevaluates the overall state and activates or deactivates FBC.
- */
-void intel_fbc_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
 	if (!fbc_supported(dev_priv))
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
-	__intel_fbc_update(crtc);
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
+	__intel_fbc_post_update(crtc);
+	mutex_unlock(&fbc->lock);
+}
+
+static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
+{
+	if (fbc->enabled)
+		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
+	else
+		return fbc->possible_framebuffer_bits;
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin)
 {
-	unsigned int fbc_bits;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
 	if (!fbc_supported(dev_priv))
 		return;
 
-	if (origin == ORIGIN_GTT)
+	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
 
-	if (dev_priv->fbc.enabled)
-		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
-	else
-		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
+	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
 
-	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+	if (fbc->enabled && fbc->busy_bits)
+		intel_fbc_deactivate(dev_priv);
 
-	if (dev_priv->fbc.busy_bits)
-		__intel_fbc_deactivate(dev_priv);
-
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&fbc->lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
 		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
 	if (!fbc_supported(dev_priv))
 		return;
 
-	if (origin == ORIGIN_GTT)
+	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
 
-	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+	fbc->busy_bits &= ~frontbuffer_bits;
 
-	if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
-		if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
+	if (!fbc->busy_bits && fbc->enabled &&
+	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
+		if (fbc->active)
 			intel_fbc_recompress(dev_priv);
-		} else {
-			__intel_fbc_deactivate(dev_priv);
-			__intel_fbc_update(dev_priv->fbc.crtc);
+		else
+			__intel_fbc_post_update(fbc->crtc);
+	}
+
+	mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_choose_crtc - select a CRTC to enable FBC on
+ * @dev_priv: i915 device instance
+ * @state: the atomic state structure
+ *
+ * This function looks at the proposed state for CRTCs and planes, then chooses
+ * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
+ * true.
+ *
+ * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
+ * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
+ */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+			   struct drm_atomic_state *state)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane *plane;
+	struct drm_plane_state *plane_state;
+	bool fbc_crtc_present = false;
+	int i, j;
+
+	mutex_lock(&fbc->lock);
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (fbc->crtc == to_intel_crtc(crtc)) {
+			fbc_crtc_present = true;
+			break;
+		}
+	}
+	/* This atomic commit doesn't involve the CRTC currently tied to FBC. */
+	if (!fbc_crtc_present && fbc->crtc != NULL)
+		goto out;
+
+	/* Simply choose the first CRTC that is compatible and has a visible
+	 * plane. We could go for fancier schemes such as checking the plane
+	 * size, but this would just affect the few platforms that don't tie FBC
+	 * to pipe or plane A. */
+	for_each_plane_in_state(state, plane, plane_state, i) {
+		struct intel_plane_state *intel_plane_state =
+			to_intel_plane_state(plane_state);
+
+		if (!intel_plane_state->visible)
+			continue;
+
+		for_each_crtc_in_state(state, crtc, crtc_state, j) {
+			struct intel_crtc_state *intel_crtc_state =
+				to_intel_crtc_state(crtc_state);
+
+			if (plane_state->crtc != crtc)
+				continue;
+
+			if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
+				break;
+
+			intel_crtc_state->enable_fbc = true;
+			goto out;
 		}
 	}
 
-	mutex_unlock(&dev_priv->fbc.lock);
+out:
+	mutex_unlock(&fbc->lock);
 }
 
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
  *
- * This function checks if it's possible to enable FBC on the following CRTC,
- * then enables it. Notice that it doesn't activate FBC.
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_enable multiple times for the same pipe without an
+ * intel_fbc_disable in the middle, as long as it is deactivated.
  */
 void intel_fbc_enable(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
 	if (!fbc_supported(dev_priv))
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
+	mutex_lock(&fbc->lock);
 
-	if (dev_priv->fbc.enabled) {
-		WARN_ON(dev_priv->fbc.crtc == crtc);
+	if (fbc->enabled) {
+		WARN_ON(fbc->crtc == NULL);
+		if (fbc->crtc == crtc) {
+			WARN_ON(!crtc->config->enable_fbc);
+			WARN_ON(fbc->active);
+		}
 		goto out;
 	}
 
-	WARN_ON(dev_priv->fbc.active);
-	WARN_ON(dev_priv->fbc.crtc != NULL);
-
-	if (intel_vgpu_active(dev_priv->dev)) {
-		set_no_fbc_reason(dev_priv, "VGPU is active");
+	if (!crtc->config->enable_fbc)
 		goto out;
-	}
 
-	if (i915.enable_fbc < 0) {
-		set_no_fbc_reason(dev_priv, "disabled per chip default");
-		goto out;
-	}
+	WARN_ON(fbc->active);
+	WARN_ON(fbc->crtc != NULL);
 
-	if (!i915.enable_fbc) {
-		set_no_fbc_reason(dev_priv, "disabled per module param");
-		goto out;
-	}
-
-	if (!crtc_can_fbc(crtc)) {
-		set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
-		goto out;
-	}
-
+	intel_fbc_update_state_cache(crtc);
 	if (intel_fbc_alloc_cfb(crtc)) {
-		set_no_fbc_reason(dev_priv, "not enough stolen memory");
+		fbc->no_fbc_reason = "not enough stolen memory";
 		goto out;
 	}
 
 	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-	dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
+	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
-	dev_priv->fbc.enabled = true;
-	dev_priv->fbc.crtc = crtc;
+	fbc->enabled = true;
+	fbc->crtc = crtc;
 out:
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&fbc->lock);
 }
 
 /**
@@ -1013,58 +1137,88 @@
  */
 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct intel_crtc *crtc = dev_priv->fbc.crtc;
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	struct intel_crtc *crtc = fbc->crtc;
 
-	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-	WARN_ON(!dev_priv->fbc.enabled);
-	WARN_ON(dev_priv->fbc.active);
-	assert_pipe_disabled(dev_priv, crtc->pipe);
+	WARN_ON(!mutex_is_locked(&fbc->lock));
+	WARN_ON(!fbc->enabled);
+	WARN_ON(fbc->active);
+	WARN_ON(crtc->active);
 
 	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
 	__intel_fbc_cleanup_cfb(dev_priv);
 
-	dev_priv->fbc.enabled = false;
-	dev_priv->fbc.crtc = NULL;
+	fbc->enabled = false;
+	fbc->crtc = NULL;
 }
 
 /**
- * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * intel_fbc_disable - disable FBC if it's associated with crtc
  * @crtc: the CRTC
  *
  * This function disables FBC if it's associated with the provided CRTC.
  */
-void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+void intel_fbc_disable(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct intel_fbc *fbc = &dev_priv->fbc;
 
 	if (!fbc_supported(dev_priv))
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
-	if (dev_priv->fbc.crtc == crtc) {
-		WARN_ON(!dev_priv->fbc.enabled);
-		WARN_ON(dev_priv->fbc.active);
+	mutex_lock(&fbc->lock);
+	if (fbc->crtc == crtc) {
+		WARN_ON(!fbc->enabled);
+		WARN_ON(fbc->active);
 		__intel_fbc_disable(dev_priv);
 	}
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&fbc->lock);
+
+	cancel_work_sync(&fbc->work.work);
 }
 
 /**
- * intel_fbc_disable - globally disable FBC
+ * intel_fbc_global_disable - globally disable FBC
  * @dev_priv: i915 device instance
  *
  * This function disables FBC regardless of which CRTC is associated with it.
  */
-void intel_fbc_disable(struct drm_i915_private *dev_priv)
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
 {
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
 	if (!fbc_supported(dev_priv))
 		return;
 
-	mutex_lock(&dev_priv->fbc.lock);
-	if (dev_priv->fbc.enabled)
+	mutex_lock(&fbc->lock);
+	if (fbc->enabled)
 		__intel_fbc_disable(dev_priv);
-	mutex_unlock(&dev_priv->fbc.lock);
+	mutex_unlock(&fbc->lock);
+
+	cancel_work_sync(&fbc->work.work);
+}
+
+/**
+ * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
+ * @dev_priv: i915 device instance
+ *
+ * The FBC code needs to track CRTC visibility since the older platforms can't
+ * have FBC enabled while multiple pipes are used. This function does the
+ * initial setup at driver load to make sure FBC is matching the real hardware.
+ */
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
+{
+	struct intel_crtc *crtc;
+
+	/* Don't even bother tracking anything if we don't need. */
+	if (!no_fbc_on_multiple_pipes(dev_priv))
+		return;
+
+	for_each_intel_crtc(dev_priv->dev, crtc)
+		if (intel_crtc_active(&crtc->base) &&
+		    to_intel_plane_state(crtc->base.primary->state)->visible)
+			dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
 /**
@@ -1075,51 +1229,35 @@
  */
 void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
+	struct intel_fbc *fbc = &dev_priv->fbc;
 	enum pipe pipe;
 
-	INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
-	mutex_init(&dev_priv->fbc.lock);
-	dev_priv->fbc.enabled = false;
-	dev_priv->fbc.active = false;
-	dev_priv->fbc.work.scheduled = false;
+	INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+	mutex_init(&fbc->lock);
+	fbc->enabled = false;
+	fbc->active = false;
+	fbc->work.scheduled = false;
 
 	if (!HAS_FBC(dev_priv)) {
-		dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
+		fbc->no_fbc_reason = "unsupported by this chipset";
 		return;
 	}
 
 	for_each_pipe(dev_priv, pipe) {
-		dev_priv->fbc.possible_framebuffer_bits |=
+		fbc->possible_framebuffer_bits |=
 				INTEL_FRONTBUFFER_PRIMARY(pipe);
 
 		if (fbc_on_pipe_a_only(dev_priv))
 			break;
 	}
 
-	if (INTEL_INFO(dev_priv)->gen >= 7) {
-		dev_priv->fbc.is_active = ilk_fbc_is_active;
-		dev_priv->fbc.activate = gen7_fbc_activate;
-		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
-	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
-		dev_priv->fbc.is_active = ilk_fbc_is_active;
-		dev_priv->fbc.activate = ilk_fbc_activate;
-		dev_priv->fbc.deactivate = ilk_fbc_deactivate;
-	} else if (IS_GM45(dev_priv)) {
-		dev_priv->fbc.is_active = g4x_fbc_is_active;
-		dev_priv->fbc.activate = g4x_fbc_activate;
-		dev_priv->fbc.deactivate = g4x_fbc_deactivate;
-	} else {
-		dev_priv->fbc.is_active = i8xx_fbc_is_active;
-		dev_priv->fbc.activate = i8xx_fbc_activate;
-		dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
-
-		/* This value was pulled out of someone's hat */
+	/* This value was pulled out of someone's hat */
+	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-	}
 
 	/* We still don't have any sort of hardware state readout for FBC, so
 	 * deactivate it in case the BIOS activated it to make sure software
 	 * matches the hardware state. */
-	if (dev_priv->fbc.is_active(dev_priv))
-		dev_priv->fbc.deactivate(dev_priv);
+	if (intel_fbc_hw_is_active(dev_priv))
+		intel_fbc_hw_deactivate(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bea75ca..97a91e63 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,7 +119,7 @@
 {
 	struct intel_fbdev *ifbdev =
 		container_of(helper, struct intel_fbdev, helper);
-	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fb;
 	struct drm_device *dev = helper->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -171,8 +171,6 @@
 
 out:
 	mutex_unlock(&dev->struct_mutex);
-	if (!IS_ERR_OR_NULL(fb))
-		drm_framebuffer_unreference(fb);
 	return ret;
 }
 
@@ -408,8 +406,8 @@
 			continue;
 		}
 
-		encoder = connector->encoder;
-		if (!encoder || WARN_ON(!encoder->crtc)) {
+		encoder = connector->state->best_encoder;
+		if (!encoder || WARN_ON(!connector->state->crtc)) {
 			if (connector->force > DRM_FORCE_OFF)
 				goto bail;
 
@@ -422,7 +420,7 @@
 
 		num_connectors_enabled++;
 
-		new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+		new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
 
 		/*
 		 * Make sure we're not trying to drive multiple connectors
@@ -468,17 +466,22 @@
 			 * usually contains. But since our current
 			 * code puts a mode derived from the post-pfit timings
 			 * into crtc->mode this works out correctly.
+			 *
+			 * This is crtc->mode and not crtc->state->mode for the
+			 * fastboot check to work correctly. crtc_state->mode has
+			 * I915_MODE_FLAG_INHERITED, which we clear to force check
+			 * state.
 			 */
 			DRM_DEBUG_KMS("looking for current mode on connector %s\n",
 				      connector->name);
-			modes[i] = &encoder->crtc->mode;
+			modes[i] = &connector->state->crtc->mode;
 		}
 		crtcs[i] = new_crtc;
 
 		DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
 			      connector->name,
-			      pipe_name(to_intel_crtc(encoder->crtc)->pipe),
-			      encoder->crtc->base.id,
+			      pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
+			      connector->state->crtc->base.id,
 			      modes[i]->hdisplay, modes[i]->vdisplay,
 			      modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 8229522..73002e9 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -43,9 +43,10 @@
 	uint32_t wq_offset;
 	uint32_t wq_size;
 	uint32_t wq_tail;
+	uint32_t wq_head;
 
 	/* GuC submission statistics & status */
-	uint64_t submissions[I915_NUM_RINGS];
+	uint64_t submissions[GUC_MAX_ENGINES_NUM];
 	uint32_t q_fail;
 	uint32_t b_fail;
 	int retcode;
@@ -88,6 +89,8 @@
 	uint32_t log_flags;
 	struct drm_i915_gem_object *log_obj;
 
+	struct drm_i915_gem_object *ads_obj;
+
 	struct drm_i915_gem_object *ctx_pool_obj;
 	struct ida ctx_ids;
 
@@ -103,8 +106,8 @@
 	uint32_t action_fail;		/* Total number of failures	*/
 	int32_t action_err;		/* Last error code		*/
 
-	uint64_t submissions[I915_NUM_RINGS];
-	uint32_t last_seqno[I915_NUM_RINGS];
+	uint64_t submissions[GUC_MAX_ENGINES_NUM];
+	uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
 };
 
 /* intel_guc_loader.c */
@@ -122,5 +125,6 @@
 		    struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_device *dev);
 void i915_guc_submission_fini(struct drm_device *dev);
+int i915_guc_wq_check_space(struct i915_guc_client *client);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 40b2ea5..2de57ff 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,10 +39,18 @@
 #define GUC_CTX_PRIORITY_HIGH		1
 #define GUC_CTX_PRIORITY_KMD_NORMAL	2
 #define GUC_CTX_PRIORITY_NORMAL		3
+#define GUC_CTX_PRIORITY_NUM		4
 
 #define GUC_MAX_GPU_CONTEXTS		1024
 #define	GUC_INVALID_CTX_ID		GUC_MAX_GPU_CONTEXTS
 
+#define GUC_RENDER_ENGINE		0
+#define GUC_VIDEO_ENGINE		1
+#define GUC_BLITTER_ENGINE		2
+#define GUC_VIDEOENHANCE_ENGINE		3
+#define GUC_VIDEO_ENGINE2		4
+#define GUC_MAX_ENGINES_NUM		(GUC_VIDEO_ENGINE2 + 1)
+
 /* Work queue item header definitions */
 #define WQ_STATUS_ACTIVE		1
 #define WQ_STATUS_SUSPENDED		2
@@ -81,11 +89,14 @@
 #define GUC_CTL_CTXINFO			0
 #define   GUC_CTL_CTXNUM_IN16_SHIFT	0
 #define   GUC_CTL_BASE_ADDR_SHIFT	12
+
 #define GUC_CTL_ARAT_HIGH		1
 #define GUC_CTL_ARAT_LOW		2
+
 #define GUC_CTL_DEVICE_INFO		3
 #define   GUC_CTL_GTTYPE_SHIFT		0
 #define   GUC_CTL_COREFAMILY_SHIFT	7
+
 #define GUC_CTL_LOG_PARAMS		4
 #define   GUC_LOG_VALID			(1 << 0)
 #define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
@@ -97,9 +108,12 @@
 #define   GUC_LOG_ISR_PAGES		3
 #define   GUC_LOG_ISR_SHIFT		9
 #define   GUC_LOG_BUF_ADDR_SHIFT	12
+
 #define GUC_CTL_PAGE_FAULT_CONTROL	5
+
 #define GUC_CTL_WA			6
 #define   GUC_CTL_WA_UK_BY_DRIVER	(1 << 3)
+
 #define GUC_CTL_FEATURE			7
 #define   GUC_CTL_VCS2_ENABLED		(1 << 0)
 #define   GUC_CTL_KERNEL_SUBMISSIONS	(1 << 1)
@@ -109,6 +123,7 @@
 #define   GUC_CTL_PREEMPTION_LOG	(1 << 5)
 #define   GUC_CTL_ENABLE_SLPC		(1 << 7)
 #define   GUC_CTL_RESET_ON_PREMPT_FAILURE	(1 << 8)
+
 #define GUC_CTL_DEBUG			8
 #define   GUC_LOG_VERBOSITY_SHIFT	0
 #define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
@@ -118,9 +133,19 @@
 /* Verbosity range-check limits, without the shift */
 #define	  GUC_LOG_VERBOSITY_MIN		0
 #define	  GUC_LOG_VERBOSITY_MAX		3
+#define	  GUC_LOG_VERBOSITY_MASK	0x0000000f
+#define	  GUC_LOG_DESTINATION_MASK	(3 << 4)
+#define   GUC_LOG_DISABLED		(1 << 6)
+#define   GUC_PROFILE_ENABLED		(1 << 7)
+#define   GUC_WQ_TRACK_ENABLED		(1 << 8)
+#define   GUC_ADS_ENABLED		(1 << 9)
+#define   GUC_DEBUG_RESERVED		(1 << 10)
+#define   GUC_ADS_ADDR_SHIFT		11
+#define   GUC_ADS_ADDR_MASK		0xfffff800
+
 #define GUC_CTL_RSRVD			9
 
-#define GUC_CTL_MAX_DWORDS		(GUC_CTL_RSRVD + 1)
+#define GUC_CTL_MAX_DWORDS		(SOFT_SCRATCH_COUNT - 2) /* [1..14] */
 
 /**
  * DOC: GuC Firmware Layout
@@ -267,7 +292,7 @@
 	u64 db_trigger_phy;
 	u16 db_id;
 
-	struct guc_execlist_context lrc[I915_NUM_RINGS];
+	struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
 
 	u8 attribute;
 
@@ -299,6 +324,99 @@
 #define GUC_POWER_D2		3
 #define GUC_POWER_D3		4
 
+/* Scheduling policy settings */
+
+/* Reset engine upon preempt failure */
+#define POLICY_RESET_ENGINE		(1<<0)
+/* Preempt to idle on quantum expiry */
+#define POLICY_PREEMPT_TO_IDLE		(1<<1)
+
+#define POLICY_MAX_NUM_WI		15
+
+struct guc_policy {
+	/* Time for one workload to execute. (in micro seconds) */
+	u32 execution_quantum;
+	u32 reserved1;
+
+	/* Time to wait for a preemption request to completed before issuing a
+	 * reset. (in micro seconds). */
+	u32 preemption_time;
+
+	/* How much time to allow to run after the first fault is observed.
+	 * Then preempt afterwards. (in micro seconds) */
+	u32 fault_time;
+
+	u32 policy_flags;
+	u32 reserved[2];
+} __packed;
+
+struct guc_policies {
+	struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+
+	/* In micro seconds. How much time to allow before DPC processing is
+	 * called back via interrupt (to prevent DPC queue drain starving).
+	 * Typically 1000s of micro seconds (example only, not granularity). */
+	u32 dpc_promote_time;
+
+	/* Must be set to take these new values. */
+	u32 is_valid;
+
+	/* Max number of WIs to process per call. A large value may keep CS
+	 * idle. */
+	u32 max_num_work_items;
+
+	u32 reserved[19];
+} __packed;
+
+/* GuC MMIO reg state struct */
+
+#define GUC_REGSET_FLAGS_NONE		0x0
+#define GUC_REGSET_POWERCYCLE		0x1
+#define GUC_REGSET_MASKED		0x2
+#define GUC_REGSET_ENGINERESET		0x4
+#define GUC_REGSET_SAVE_DEFAULT_VALUE	0x8
+#define GUC_REGSET_SAVE_CURRENT_VALUE	0x10
+
+#define GUC_REGSET_MAX_REGISTERS	25
+#define GUC_MMIO_WHITE_LIST_START	0x24d0
+#define GUC_MMIO_WHITE_LIST_MAX		12
+#define GUC_S3_SAVE_SPACE_PAGES		10
+
+struct guc_mmio_regset {
+	struct __packed {
+		u32 offset;
+		u32 value;
+		u32 flags;
+	} registers[GUC_REGSET_MAX_REGISTERS];
+
+	u32 values_valid;
+	u32 number_of_registers;
+} __packed;
+
+struct guc_mmio_reg_state {
+	struct guc_mmio_regset global_reg;
+	struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
+
+	/* MMIO registers that are set as non privileged */
+	struct __packed {
+		u32 mmio_start;
+		u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+		u32 count;
+	} mmio_white_list[GUC_MAX_ENGINES_NUM];
+} __packed;
+
+/* GuC Additional Data Struct */
+
+struct guc_ads {
+	u32 reg_state_addr;
+	u32 reg_state_buffer;
+	u32 golden_context_lrca;
+	u32 scheduler_policies;
+	u32 reserved0[3];
+	u32 eng_state_size[GUC_MAX_ENGINES_NUM];
+	u32 reserved2[4];
+} __packed;
+
 /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
 enum host2guc_action {
 	HOST2GUC_ACTION_DEFAULT = 0x0,
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 550921f..82a3c03 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -165,6 +165,13 @@
 			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
 	}
 
+	if (guc->ads_obj) {
+		u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
+				>> PAGE_SHIFT;
+		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+	}
+
 	/* If GuC submission is enabled, set up additional parameters here */
 	if (i915.enable_guc_submission) {
 		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
@@ -192,7 +199,7 @@
  * the value matches either of two values representing completion
  * of the GuC boot process.
  *
- * This is used for polling the GuC status in a wait_for_atomic()
+ * This is used for polling the GuC status in a wait_for()
  * loop below.
  */
 static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
@@ -252,14 +259,14 @@
 	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
 
 	/*
-	 * Spin-wait for the DMA to complete & the GuC to start up.
+	 * Wait for the DMA to complete & the GuC to start up.
 	 * NB: Docs recommend not using the interrupt for completion.
 	 * Measurements indicate this should take no more than 20ms, so a
 	 * timeout here indicates that the GuC has failed and is unusable.
 	 * (Higher levels of the driver will attempt to fall back to
 	 * execlist mode if this happens.)
 	 */
-	ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
+	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
 
 	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
 			I915_READ(DMA_CTRL), status);
@@ -438,6 +445,7 @@
 
 	direct_interrupts_to_host(dev_priv);
 	i915_guc_submission_disable(dev);
+	i915_guc_submission_fini(dev);
 
 	return err;
 }
@@ -554,10 +562,12 @@
 	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
 		  guc_fw->guc_fw_path, err);
 
+	mutex_lock(&dev->struct_mutex);
 	obj = guc_fw->guc_fw_obj;
 	if (obj)
 		drm_gem_object_unreference(&obj->base);
 	guc_fw->guc_fw_obj = NULL;
+	mutex_unlock(&dev->struct_mutex);
 
 	release_firmware(fw);		/* OK even if fw is NULL */
 	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
@@ -624,10 +634,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
+	mutex_lock(&dev->struct_mutex);
 	direct_interrupts_to_host(dev_priv);
+	i915_guc_submission_disable(dev);
 	i915_guc_submission_fini(dev);
 
-	mutex_lock(&dev->struct_mutex);
 	if (guc_fw->guc_fw_obj)
 		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
 	guc_fw->guc_fw_obj = NULL;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 616108c..a0d8dae 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1210,11 +1210,19 @@
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 	enum drm_mode_status status;
 	int clock;
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
 	clock = mode->clock;
+
+	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+		clock *= 2;
+
+	if (clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
+
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		clock *= 2;
 
@@ -2041,6 +2049,11 @@
 	enum port port = intel_dig_port->port;
 	uint8_t alternate_ddc_pin;
 
+	if (WARN(intel_dig_port->max_lanes < 4,
+		 "Not enough lanes (%d) for HDMI on port %c\n",
+		 intel_dig_port->max_lanes, port_name(port)))
+		return;
+
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
 	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -2224,6 +2237,7 @@
 	intel_dig_port->port = port;
 	intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+	intel_dig_port->max_lanes = 4;
 
 	intel_hdmi_init_connector(intel_dig_port, intel_connector);
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f1fa756..6a978ce 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,9 +223,11 @@
 	FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
-#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
+#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT	0x17
+#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT	0x26
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static int intel_lr_context_pin(struct intel_context *ctx,
+				struct intel_engine_cs *engine);
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
 		struct drm_i915_gem_object *default_ctx_obj);
 
@@ -263,9 +265,76 @@
 	return 0;
 }
 
+static void
+logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+{
+	struct drm_device *dev = ring->dev;
+
+	ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+					IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+					(ring->id == VCS || ring->id == VCS2);
+
+	ring->ctx_desc_template = GEN8_CTX_VALID;
+	ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+				   GEN8_CTX_ADDRESSING_MODE_SHIFT;
+	if (IS_GEN8(dev))
+		ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+	ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+
+	/* TODO: WaDisableLiteRestore when we start using semaphore
+	 * signalling between Command Streamers */
+	/* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
+
+	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
+	/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+	if (ring->disable_lite_restore_wa)
+		ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+}
+
+/**
+ * intel_lr_context_descriptor_update() - calculate & cache the descriptor
+ * 					  descriptor for a pinned context
+ *
+ * @ctx: Context to work on
+ * @ring: Engine the descriptor will be used with
+ *
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB:
+ *    bits 0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
+ *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
+ *    bits 32-51:    ctx ID, a globally unique tag (the LRCA again!)
+ *    bits 52-63:    reserved, may encode the engine ID (for GuC)
+ */
+static void
+intel_lr_context_descriptor_update(struct intel_context *ctx,
+				   struct intel_engine_cs *ring)
+{
+	uint64_t lrca, desc;
+
+	lrca = ctx->engine[ring->id].lrc_vma->node.start +
+	       LRC_PPHWSP_PN * PAGE_SIZE;
+
+	desc = ring->ctx_desc_template;			   /* bits  0-11 */
+	desc |= lrca;					   /* bits 12-31 */
+	desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
+
+	ctx->engine[ring->id].lrc_desc = desc;
+}
+
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+				     struct intel_engine_cs *ring)
+{
+	return ctx->engine[ring->id].lrc_desc;
+}
+
 /**
  * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx_obj: Logical Ring Context backing object.
+ * @ctx: Context to get the ID for
+ * @ring: Engine to get the ID for
  *
  * Do not confuse with ctx->id! Unfortunately we have a name overload
  * here: the old context ID we pass to userspace as a handler so that
@@ -273,55 +342,15 @@
  * ELSP so that the GPU can inform us of the context status via
  * interrupts.
  *
+ * The context ID is a portion of the context descriptor, so we can
+ * just extract the required part from the cached descriptor.
+ *
  * Return: 20-bits globally unique context ID.
  */
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+			   struct intel_engine_cs *ring)
 {
-	u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
-			LRC_PPHWSP_PN * PAGE_SIZE;
-
-	/* LRCA is required to be 4K aligned so the more significant 20 bits
-	 * are globally unique */
-	return lrca >> 12;
-}
-
-static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
-{
-	struct drm_device *dev = ring->dev;
-
-	return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-		IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-	       (ring->id == VCS || ring->id == VCS2);
-}
-
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-				     struct intel_engine_cs *ring)
-{
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	uint64_t desc;
-	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
-			LRC_PPHWSP_PN * PAGE_SIZE;
-
-	WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
-
-	desc = GEN8_CTX_VALID;
-	desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-	if (IS_GEN8(ctx_obj->base.dev))
-		desc |= GEN8_CTX_L3LLC_COHERENT;
-	desc |= GEN8_CTX_PRIVILEGE;
-	desc |= lrca;
-	desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
-
-	/* TODO: WaDisableLiteRestore when we start using semaphore
-	 * signalling between Command Streamers */
-	/* desc |= GEN8_CTX_FORCE_RESTORE; */
-
-	/* WaEnableForceRestoreInCtxtDescForVCS:skl */
-	/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-	if (disable_lite_restore_wa(ring))
-		desc |= GEN8_CTX_FORCE_RESTORE;
-
-	return desc;
+	return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
 }
 
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -363,20 +392,9 @@
 {
 	struct intel_engine_cs *ring = rq->ring;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-	struct page *page;
-	uint32_t *reg_state;
-
-	BUG_ON(!ctx_obj);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
-	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
-	page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-	reg_state = kmap_atomic(page);
+	uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
 	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
 		/* True 32b PPGTT with dynamic page allocation: update PDP
@@ -390,8 +408,6 @@
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 	}
 
-	kunmap_atomic(reg_state);
-
 	return 0;
 }
 
@@ -431,9 +447,8 @@
 			/* Same ctx: ignore first request, as second request
 			 * will update tail past first request's workload */
 			cursor->elsp_submitted = req0->elsp_submitted;
-			list_del(&req0->execlist_link);
-			list_add_tail(&req0->execlist_link,
-				&ring->execlist_retired_req_list);
+			list_move_tail(&req0->execlist_link,
+				       &ring->execlist_retired_req_list);
 			req0 = cursor;
 		} else {
 			req1 = cursor;
@@ -478,16 +493,13 @@
 					    execlist_link);
 
 	if (head_req != NULL) {
-		struct drm_i915_gem_object *ctx_obj =
-				head_req->ctx->engine[ring->id].state;
-		if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+		if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
 			WARN(head_req->elsp_submitted == 0,
 			     "Never submitted head request\n");
 
 			if (--head_req->elsp_submitted <= 0) {
-				list_del(&head_req->execlist_link);
-				list_add_tail(&head_req->execlist_link,
-					&ring->execlist_retired_req_list);
+				list_move_tail(&head_req->execlist_link,
+					       &ring->execlist_retired_req_list);
 				return true;
 			}
 		}
@@ -496,6 +508,19 @@
 	return false;
 }
 
+static void get_context_status(struct intel_engine_cs *ring,
+			       u8 read_pointer,
+			       u32 *status, u32 *context_id)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
+		return;
+
+	*status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
+	*context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+}
+
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
  * @ring: Engine Command Streamer to handle.
@@ -516,16 +541,16 @@
 	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
 	read_pointer = ring->next_context_status_buffer;
-	write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
+	write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 	if (read_pointer > write_pointer)
 		write_pointer += GEN8_CSB_ENTRIES;
 
 	spin_lock(&ring->execlist_lock);
 
 	while (read_pointer < write_pointer) {
-		read_pointer++;
-		status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
-		status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
+
+		get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
+				   &status, &status_id);
 
 		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
 			continue;
@@ -538,14 +563,14 @@
 				WARN(1, "Preemption without Lite Restore\n");
 		}
 
-		 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
-		     (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+		if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+		    (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
 			if (execlists_check_remove_request(ring, status_id))
 				submit_contexts++;
 		}
 	}
 
-	if (disable_lite_restore_wa(ring)) {
+	if (ring->disable_lite_restore_wa) {
 		/* Prevent a ctx to preempt itself */
 		if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
 		    (submit_contexts != 0))
@@ -556,13 +581,16 @@
 
 	spin_unlock(&ring->execlist_lock);
 
-	WARN(submit_contexts > 2, "More than two context complete events?\n");
+	if (unlikely(submit_contexts > 2))
+		DRM_ERROR("More than two context complete events?\n");
+
 	ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
+	/* Update the read pointer to the old write pointer. Manual ringbuffer
+	 * management ftw </sarcasm> */
 	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
-				 ((u32)ring->next_context_status_buffer &
-				  GEN8_CSB_PTR_MASK) << 8));
+		   _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+				 ring->next_context_status_buffer << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -571,8 +599,8 @@
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (request->ctx != ring->default_context)
-		intel_lr_context_pin(request);
+	if (request->ctx != request->i915->kernel_context)
+		intel_lr_context_pin(request->ctx, ring);
 
 	i915_gem_request_reference(request);
 
@@ -592,9 +620,8 @@
 		if (request->ctx == tail_req->ctx) {
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
-			list_del(&tail_req->execlist_link);
-			list_add_tail(&tail_req->execlist_link,
-				&ring->execlist_retired_req_list);
+			list_move_tail(&tail_req->execlist_link,
+				       &ring->execlist_retired_req_list);
 		}
 	}
 
@@ -660,17 +687,27 @@
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-	int ret;
+	int ret = 0;
 
 	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
 
-	if (request->ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request);
+	if (i915.enable_guc_submission) {
+		/*
+		 * Check that the GuC has space for the request before
+		 * going any further, as the i915_add_request() call
+		 * later on mustn't fail ...
+		 */
+		struct intel_guc *guc = &request->i915->guc;
+
+		ret = i915_guc_wq_check_space(guc->execbuf_client);
 		if (ret)
 			return ret;
 	}
 
-	return 0;
+	if (request->ctx != request->i915->kernel_context)
+		ret = intel_lr_context_pin(request->ctx, request->ring);
+
+	return ret;
 }
 
 static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
@@ -724,23 +761,46 @@
  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
-static void
+static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = request->ring;
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct drm_i915_private *dev_priv = request->i915;
+	struct intel_engine_cs *engine = request->ring;
 
-	intel_logical_ring_advance(request->ringbuf);
+	intel_logical_ring_advance(ringbuf);
+	request->tail = ringbuf->tail;
 
-	request->tail = request->ringbuf->tail;
+	/*
+	 * Here we add two extra NOOPs as padding to avoid
+	 * lite restore of a context with HEAD==TAIL.
+	 *
+	 * Caller must reserve WA_TAIL_DWORDS for us!
+	 */
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
 
-	if (intel_ring_stopped(ring))
-		return;
+	if (intel_ring_stopped(engine))
+		return 0;
+
+	if (engine->last_context != request->ctx) {
+		if (engine->last_context)
+			intel_lr_context_unpin(engine->last_context, engine);
+		if (request->ctx != request->i915->kernel_context) {
+			intel_lr_context_pin(request->ctx, engine);
+			engine->last_context = request->ctx;
+		} else {
+			engine->last_context = NULL;
+		}
+	}
 
 	if (dev_priv->guc.execbuf_client)
 		i915_guc_submit(dev_priv->guc.execbuf_client, request);
 	else
 		execlists_context_queue(request);
+
+	return 0;
 }
 
 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@@ -967,8 +1027,9 @@
 		struct drm_i915_gem_object *ctx_obj =
 				ctx->engine[ring->id].state;
 
-		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(req);
+		if (ctx_obj && (ctx != req->i915->kernel_context))
+			intel_lr_context_unpin(ctx, ring);
+
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
@@ -1012,24 +1073,39 @@
 	return 0;
 }
 
-static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
-		struct drm_i915_gem_object *ctx_obj,
-		struct intel_ringbuffer *ringbuf)
+static int intel_lr_context_do_pin(struct intel_context *ctx,
+				   struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = 0;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct page *lrc_state_page;
+	uint32_t *lrc_reg_state;
+	int ret;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
 	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
 			PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
 	if (ret)
 		return ret;
 
+	lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+	if (WARN_ON(!lrc_state_page)) {
+		ret = -ENODEV;
+		goto unpin_ctx_obj;
+	}
+
 	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
 	if (ret)
 		goto unpin_ctx_obj;
 
+	ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+	intel_lr_context_descriptor_update(ctx, ring);
+	lrc_reg_state = kmap(lrc_state_page);
+	lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
+	ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
 	ctx_obj->dirty = true;
 
 	/* Invalidate GuC TLB. */
@@ -1044,37 +1120,40 @@
 	return ret;
 }
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
+static int intel_lr_context_pin(struct intel_context *ctx,
+				struct intel_engine_cs *engine)
 {
 	int ret = 0;
-	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
-	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
-		ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+	if (ctx->engine[engine->id].pin_count++ == 0) {
+		ret = intel_lr_context_do_pin(ctx, engine);
 		if (ret)
 			goto reset_pin_count;
+
+		i915_gem_context_reference(ctx);
 	}
 	return ret;
 
 reset_pin_count:
-	rq->ctx->engine[ring->id].pin_count = 0;
+	ctx->engine[engine->id].pin_count = 0;
 	return ret;
 }
 
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
+void intel_lr_context_unpin(struct intel_context *ctx,
+			    struct intel_engine_cs *engine)
 {
-	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
 
-	if (ctx_obj) {
-		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--rq->ctx->engine[ring->id].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
-			i915_gem_object_ggtt_unpin(ctx_obj);
-		}
+	WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
+	if (--ctx->engine[engine->id].pin_count == 0) {
+		kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+		intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
+		i915_gem_object_ggtt_unpin(ctx_obj);
+		ctx->engine[engine->id].lrc_vma = NULL;
+		ctx->engine[engine->id].lrc_desc = 0;
+		ctx->engine[engine->id].lrc_reg_state = NULL;
+
+		i915_gem_context_unreference(ctx);
 	}
 }
 
@@ -1087,7 +1166,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
-	if (WARN_ON_ONCE(w->count == 0))
+	if (w->count == 0)
 		return 0;
 
 	ring->gpu_caches_dirty = true;
@@ -1474,7 +1553,7 @@
 	u8 next_context_status_buffer_hw;
 
 	lrc_setup_hardware_status_page(ring,
-				ring->default_context->engine[ring->id].state);
+				dev_priv->kernel_context->engine[ring->id].state);
 
 	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
 	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1493,9 +1572,11 @@
 	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
 	 * BDW  | CSB regs not reset       | CSB regs reset       |
 	 * CHT  | CSB regs not reset       | CSB regs not reset   |
+	 * SKL  |         ?                |         ?            |
+	 * BXT  |         ?                |         ?            |
 	 */
-	next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
-						   & GEN8_CSB_PTR_MASK);
+	next_context_status_buffer_hw =
+		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
 
 	/*
 	 * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1698,7 +1779,7 @@
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-	bool vf_flush_wa;
+	bool vf_flush_wa = false;
 	u32 flags = 0;
 	int ret;
 
@@ -1720,14 +1801,14 @@
 		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_QW_WRITE;
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-	}
 
-	/*
-	 * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
-	 * control.
-	 */
-	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
-		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		/*
+		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+		 * pipe control.
+		 */
+		if (IS_GEN9(ring->dev))
+			vf_flush_wa = true;
+	}
 
 	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
 	if (ret)
@@ -1791,44 +1872,65 @@
 	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+#define WA_TAIL_DWORDS 2
+
+static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
+{
+	return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *ring = ringbuf->ring;
-	u32 cmd;
 	int ret;
 
-	/*
-	 * Reserve space for 2 NOOPs at the end of each request to be
-	 * used as a workaround for not being allowed to do lite
-	 * restore with HEAD==TAIL (WaIdleLiteRestore).
-	 */
-	ret = intel_logical_ring_begin(request, 8);
+	ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
 	if (ret)
 		return ret;
 
-	cmd = MI_STORE_DWORD_IMM_GEN4;
-	cmd |= MI_GLOBAL_GTT;
+	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	intel_logical_ring_emit(ringbuf, cmd);
 	intel_logical_ring_emit(ringbuf,
-				(ring->status_page.gfx_addr +
-				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+	intel_logical_ring_emit(ringbuf,
+				hws_seqno_address(request->ring) |
+				MI_FLUSH_DW_USE_GTT);
 	intel_logical_ring_emit(ringbuf, 0);
 	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(request);
+	return intel_logical_ring_advance_and_submit(request);
+}
 
-	/*
-	 * Here we add two extra NOOPs as padding to avoid
-	 * lite restore of a context with HEAD==TAIL.
+static int gen8_emit_request_render(struct drm_i915_gem_request *request)
+{
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	int ret;
+
+	ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+	if (ret)
+		return ret;
+
+	/* w/a for post sync ops following a GPGPU operation we
+	 * need a prior CS_STALL, which is emitted by the flush
+	 * following the batch.
 	 */
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
-
-	return 0;
+	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+	intel_logical_ring_emit(ringbuf,
+				(PIPE_CONTROL_GLOBAL_GTT_IVB |
+				 PIPE_CONTROL_CS_STALL |
+				 PIPE_CONTROL_QW_WRITE));
+	intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+	intel_logical_ring_emit(ringbuf, 0);
+	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+	return intel_logical_ring_advance_and_submit(request);
 }
 
 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@@ -1911,12 +2013,44 @@
 		ring->status_page.obj = NULL;
 	}
 
+	ring->disable_lite_restore_wa = false;
+	ring->ctx_desc_template = 0;
+
 	lrc_destroy_wa_ctx_obj(ring);
 	ring->dev = NULL;
 }
 
-static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+static void
+logical_ring_default_vfuncs(struct drm_device *dev,
+			    struct intel_engine_cs *ring)
 {
+	/* Default vfuncs which can be overriden by each engine. */
+	ring->init_hw = gen8_init_common_ring;
+	ring->emit_request = gen8_emit_request;
+	ring->emit_flush = gen8_emit_flush;
+	ring->irq_get = gen8_logical_ring_get_irq;
+	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->emit_bb_start = gen8_emit_bb_start;
+	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+		ring->get_seqno = bxt_a_get_seqno;
+		ring->set_seqno = bxt_a_set_seqno;
+	} else {
+		ring->get_seqno = gen8_get_seqno;
+		ring->set_seqno = gen8_set_seqno;
+	}
+}
+
+static inline void
+logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+{
+	ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+	ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+	struct intel_context *dctx = to_i915(dev)->kernel_context;
 	int ret;
 
 	/* Intentionally left blank. */
@@ -1933,19 +2067,18 @@
 	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
 	spin_lock_init(&ring->execlist_lock);
 
+	logical_ring_init_platform_invariants(ring);
+
 	ret = i915_cmd_parser_init_ring(ring);
 	if (ret)
 		goto error;
 
-	ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+	ret = intel_lr_context_deferred_alloc(dctx, ring);
 	if (ret)
 		goto error;
 
 	/* As this is the default context, always pin it */
-	ret = intel_lr_context_do_pin(
-			ring,
-			ring->default_context->engine[ring->id].state,
-			ring->default_context->engine[ring->id].ringbuf);
+	ret = intel_lr_context_do_pin(dctx, ring);
 	if (ret) {
 		DRM_ERROR(
 			"Failed to pin and map ringbuffer %s: %d\n",
@@ -1968,32 +2101,25 @@
 
 	ring->name = "render ring";
 	ring->id = RCS;
+	ring->exec_id = I915_EXEC_RENDER;
+	ring->guc_id = GUC_RENDER_ENGINE;
 	ring->mmio_base = RENDER_RING_BASE;
-	ring->irq_enable_mask =
-		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
-	ring->irq_keep_mask =
-		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+
+	logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
 	if (HAS_L3_DPF(dev))
 		ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
+	logical_ring_default_vfuncs(dev, ring);
+
+	/* Override some for render ring. */
 	if (INTEL_INFO(dev)->gen >= 9)
 		ring->init_hw = gen9_init_render_ring;
 	else
 		ring->init_hw = gen8_init_render_ring;
 	ring->init_context = gen8_init_rcs_context;
 	ring->cleanup = intel_fini_pipe_control;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
-	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
-	}
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush_render;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	ring->emit_request = gen8_emit_request_render;
 
 	ring->dev = dev;
 
@@ -2027,25 +2153,12 @@
 
 	ring->name = "bsd ring";
 	ring->id = VCS;
+	ring->exec_id = I915_EXEC_BSD;
+	ring->guc_id = GUC_VIDEO_ENGINE;
 	ring->mmio_base = GEN6_BSD_RING_BASE;
-	ring->irq_enable_mask =
-		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-	ring->irq_keep_mask =
-		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
-	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
-	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
-	}
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, ring);
 
 	return logical_ring_init(dev, ring);
 }
@@ -2055,22 +2168,14 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
-	ring->name = "bds2 ring";
+	ring->name = "bsd2 ring";
 	ring->id = VCS2;
+	ring->exec_id = I915_EXEC_BSD;
+	ring->guc_id = GUC_VIDEO_ENGINE2;
 	ring->mmio_base = GEN8_BSD2_RING_BASE;
-	ring->irq_enable_mask =
-		GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	ring->irq_keep_mask =
-		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
-	ring->init_hw = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, ring);
 
 	return logical_ring_init(dev, ring);
 }
@@ -2082,25 +2187,12 @@
 
 	ring->name = "blitter ring";
 	ring->id = BCS;
+	ring->exec_id = I915_EXEC_BLT;
+	ring->guc_id = GUC_BLITTER_ENGINE;
 	ring->mmio_base = BLT_RING_BASE;
-	ring->irq_enable_mask =
-		GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-	ring->irq_keep_mask =
-		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
-	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
-	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
-	}
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, ring);
 
 	return logical_ring_init(dev, ring);
 }
@@ -2112,25 +2204,12 @@
 
 	ring->name = "video enhancement ring";
 	ring->id = VECS;
+	ring->exec_id = I915_EXEC_VEBOX;
+	ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
 	ring->mmio_base = VEBOX_RING_BASE;
-	ring->irq_enable_mask =
-		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-	ring->irq_keep_mask =
-		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
-	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
-	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
-	}
-	ring->emit_request = gen8_emit_request;
-	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
-	ring->emit_bb_start = gen8_emit_bb_start;
+	logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
+	logical_ring_default_vfuncs(dev, ring);
 
 	return logical_ring_init(dev, ring);
 }
@@ -2235,6 +2314,27 @@
 	return rpcs;
 }
 
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+{
+	u32 indirect_ctx_offset;
+
+	switch (INTEL_INFO(ring->dev)->gen) {
+	default:
+		MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+		/* fall through */
+	case 9:
+		indirect_ctx_offset =
+			GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+		break;
+	case 8:
+		indirect_ctx_offset =
+			GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+		break;
+	}
+
+	return indirect_ctx_offset;
+}
+
 static int
 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
 		    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@@ -2278,7 +2378,8 @@
 	ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
 		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
 					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-					  CTX_CTRL_RS_CTX_ENABLE));
+					  (HAS_RESOURCE_STREAMER(dev) ?
+					    CTX_CTRL_RS_CTX_ENABLE : 0)));
 	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
 	/* Ring buffer start address is not known until the buffer is pinned.
@@ -2307,7 +2408,7 @@
 				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
 			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
-				CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+				intel_lr_indirect_ctx_offset(ring) << 6;
 
 			reg_state[CTX_BB_PER_CTX_PTR+1] =
 				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2368,26 +2469,39 @@
 {
 	int i;
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
+	for (i = I915_NUM_RINGS; --i >= 0; ) {
+		struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
 		struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
-		if (ctx_obj) {
-			struct intel_ringbuffer *ringbuf =
-					ctx->engine[i].ringbuf;
-			struct intel_engine_cs *ring = ringbuf->ring;
+		if (!ctx_obj)
+			continue;
 
-			if (ctx == ring->default_context) {
-				intel_unpin_ringbuffer_obj(ringbuf);
-				i915_gem_object_ggtt_unpin(ctx_obj);
-			}
-			WARN_ON(ctx->engine[ring->id].pin_count);
-			intel_ringbuffer_free(ringbuf);
-			drm_gem_object_unreference(&ctx_obj->base);
+		if (ctx == ctx->i915->kernel_context) {
+			intel_unpin_ringbuffer_obj(ringbuf);
+			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
+
+		WARN_ON(ctx->engine[i].pin_count);
+		intel_ringbuffer_free(ringbuf);
+		drm_gem_object_unreference(&ctx_obj->base);
 	}
 }
 
-static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+/**
+ * intel_lr_context_size() - return the size of the context for an engine
+ * @ring: which engine to find the context size for
+ *
+ * Each engine may require a different amount of space for a context image,
+ * so when allocating (or copying) an image, this function can be used to
+ * find the right size for the specific engine.
+ *
+ * Return: size (in bytes) of an engine-specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
 {
 	int ret = 0;
 
@@ -2444,7 +2558,7 @@
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-				     struct intel_engine_cs *ring)
+				    struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_gem_object *ctx_obj;
@@ -2455,7 +2569,7 @@
 	WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
 	WARN_ON(ctx->engine[ring->id].state);
 
-	context_size = round_up(get_lr_context_size(ring), 4096);
+	context_size = round_up(intel_lr_context_size(ring), 4096);
 
 	/* One extra page as the sharing data between driver and GuC */
 	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2481,14 +2595,13 @@
 	ctx->engine[ring->id].ringbuf = ringbuf;
 	ctx->engine[ring->id].state = ctx_obj;
 
-	if (ctx != ring->default_context && ring->init_context) {
+	if (ctx != ctx->i915->kernel_context && ring->init_context) {
 		struct drm_i915_gem_request *req;
 
-		ret = i915_gem_request_alloc(ring,
-			ctx, &req);
-		if (ret) {
-			DRM_ERROR("ring create req: %d\n",
-				ret);
+		req = i915_gem_request_alloc(ring, ctx);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			DRM_ERROR("ring create req: %d\n", ret);
 			goto error_ringbuf;
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 0b821b9..e6cda3e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,8 +25,6 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
@@ -40,6 +38,22 @@
 #define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
 #define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
 
+/* The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+#define GEN8_CSB_WRITE_PTR(csb_status) \
+	(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
+#define GEN8_CSB_READ_PTR(csb_status) \
+	(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
 /* Logical Rings */
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -84,21 +98,25 @@
 #define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
 
 void intel_lr_context_free(struct intel_context *ctx);
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 				    struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct drm_i915_gem_request *req);
+void intel_lr_context_unpin(struct intel_context *ctx,
+			    struct intel_engine_cs *engine);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
 
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+			   struct intel_engine_cs *ring);
+
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas);
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
 void intel_execlists_retire_requests(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc04d8d..30a8403 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -31,6 +31,7 @@
 #include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
@@ -1088,7 +1089,12 @@
 	 * preferred mode is the right one.
 	 */
 	mutex_lock(&dev->mode_config.mutex);
-	edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+	if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
+		edid = drm_get_edid_switcheroo(connector,
+				    intel_gmbus_get_adapter(dev_priv, pin));
+	else
+		edid = drm_get_edid(connector,
+				    intel_gmbus_get_adapter(dev_priv, pin));
 	if (edid) {
 		if (drm_add_edid_modes(connector, edid)) {
 			drm_mode_connector_update_edid_property(connector,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 76f1980..9168413 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -240,9 +240,9 @@
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-	if (ret)
-		return ret;
+	req = i915_gem_request_alloc(ring, NULL);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 4);
 	if (ret) {
@@ -283,9 +283,9 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-	if (ret)
-		return ret;
+	req = i915_gem_request_alloc(ring, NULL);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 2);
 	if (ret) {
@@ -349,9 +349,9 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-	if (ret)
-		return ret;
+	req = i915_gem_request_alloc(ring, NULL);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret) {
@@ -423,9 +423,9 @@
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
 
-		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-		if (ret)
-			return ret;
+		req = i915_gem_request_alloc(ring, NULL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
 
 		ret = intel_ring_begin(req, 2);
 		if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b28c29f..347d4df 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,8 @@
 #include <linux/module.h>
 
 /**
+ * DOC: RC6
+ *
  * RC6 is a special power stage which allows the GPU to enter an very
  * low-voltage mode when idle, using down to 0V while at this stage.  This
  * stage is entered automatically when the GPU is idle when RC6 support is
@@ -546,7 +548,7 @@
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
  * @wm: chip FIFO params
- * @pixel_size: display pixel size
+ * @cpp: bytes per pixel
  * @latency_ns: memory latency for the platform
  *
  * Calculate the watermark level (the level at which the display plane will
@@ -562,8 +564,7 @@
  */
 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
 					const struct intel_watermark_params *wm,
-					int fifo_size,
-					int pixel_size,
+					int fifo_size, int cpp,
 					unsigned long latency_ns)
 {
 	long entries_required, wm_size;
@@ -574,7 +575,7 @@
 	 * clocks go from a few thousand to several hundred thousand.
 	 * latency is usually a few thousand
 	 */
-	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+	entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
 		1000;
 	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
@@ -638,13 +639,13 @@
 	crtc = single_enabled_crtc(dev);
 	if (crtc) {
 		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
-		int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 		int clock = adjusted_mode->crtc_clock;
 
 		/* Display SR */
 		wm = intel_calculate_wm(clock, &pineview_display_wm,
 					pineview_display_wm.fifo_size,
-					pixel_size, latency->display_sr);
+					cpp, latency->display_sr);
 		reg = I915_READ(DSPFW1);
 		reg &= ~DSPFW_SR_MASK;
 		reg |= FW_WM(wm, SR);
@@ -654,7 +655,7 @@
 		/* cursor SR */
 		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
 					pineview_display_wm.fifo_size,
-					pixel_size, latency->cursor_sr);
+					cpp, latency->cursor_sr);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_CURSOR_SR_MASK;
 		reg |= FW_WM(wm, CURSOR_SR);
@@ -663,7 +664,7 @@
 		/* Display HPLL off SR */
 		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
 					pineview_display_hplloff_wm.fifo_size,
-					pixel_size, latency->display_hpll_disable);
+					cpp, latency->display_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_SR_MASK;
 		reg |= FW_WM(wm, HPLL_SR);
@@ -672,7 +673,7 @@
 		/* cursor HPLL off SR */
 		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
 					pineview_display_hplloff_wm.fifo_size,
-					pixel_size, latency->cursor_hpll_disable);
+					cpp, latency->cursor_hpll_disable);
 		reg = I915_READ(DSPFW3);
 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
 		reg |= FW_WM(wm, HPLL_CURSOR);
@@ -696,7 +697,7 @@
 {
 	struct drm_crtc *crtc;
 	const struct drm_display_mode *adjusted_mode;
-	int htotal, hdisplay, clock, pixel_size;
+	int htotal, hdisplay, clock, cpp;
 	int line_time_us, line_count;
 	int entries, tlb_miss;
 
@@ -711,10 +712,10 @@
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
 	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-	pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
 	/* Use the small buffer method to calculate plane watermark */
-	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
 	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
 	if (tlb_miss > 0)
 		entries += tlb_miss;
@@ -726,7 +727,7 @@
 	/* Use the large buffer method to calculate cursor watermark */
 	line_time_us = max(htotal * 1000 / clock, 1);
 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-	entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
+	entries = line_count * crtc->cursor->state->crtc_w * cpp;
 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
 	if (tlb_miss > 0)
 		entries += tlb_miss;
@@ -782,7 +783,7 @@
 {
 	struct drm_crtc *crtc;
 	const struct drm_display_mode *adjusted_mode;
-	int hdisplay, htotal, pixel_size, clock;
+	int hdisplay, htotal, cpp, clock;
 	unsigned long line_time_us;
 	int line_count, line_size;
 	int small, large;
@@ -798,21 +799,21 @@
 	clock = adjusted_mode->crtc_clock;
 	htotal = adjusted_mode->crtc_htotal;
 	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-	pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
 	line_time_us = max(htotal * 1000 / clock, 1);
 	line_count = (latency_ns / line_time_us + 1000) / 1000;
-	line_size = hdisplay * pixel_size;
+	line_size = hdisplay * cpp;
 
 	/* Use the minimum of the small and large buffer method for primary */
-	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	small = ((clock * cpp / 1000) * latency_ns) / 1000;
 	large = line_count * line_size;
 
 	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
 	*display_wm = entries + display->guard_size;
 
 	/* calculate the self-refresh watermark for display cursor */
-	entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
+	entries = line_count * cpp * crtc->cursor->state->crtc_w;
 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 	*cursor_wm = entries + cursor->guard_size;
 
@@ -904,13 +905,13 @@
 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 				   unsigned int pipe_htotal,
 				   unsigned int horiz_pixels,
-				   unsigned int bytes_per_pixel,
+				   unsigned int cpp,
 				   unsigned int latency)
 {
 	unsigned int ret;
 
 	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
-	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = (ret + 1) * horiz_pixels * cpp;
 	ret = DIV_ROUND_UP(ret, 64);
 
 	return ret;
@@ -939,7 +940,7 @@
 				     int level)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	int clock, htotal, pixel_size, width, wm;
+	int clock, htotal, cpp, width, wm;
 
 	if (dev_priv->wm.pri_latency[level] == 0)
 		return USHRT_MAX;
@@ -947,7 +948,7 @@
 	if (!state->visible)
 		return 0;
 
-	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+	cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
 	clock = crtc->config->base.adjusted_mode.crtc_clock;
 	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
 	width = crtc->config->pipe_src_w;
@@ -963,7 +964,7 @@
 		 */
 		wm = 63;
 	} else {
-		wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+		wm = vlv_wm_method2(clock, htotal, width, cpp,
 				    dev_priv->wm.pri_latency[level] * 10);
 	}
 
@@ -1437,7 +1438,7 @@
 		int clock = adjusted_mode->crtc_clock;
 		int htotal = adjusted_mode->crtc_htotal;
 		int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-		int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 		unsigned long line_time_us;
 		int entries;
 
@@ -1445,7 +1446,7 @@
 
 		/* Use ns/us then divide to preserve precision */
 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * hdisplay;
+			cpp * hdisplay;
 		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
 		srwm = I965_FIFO_SIZE - entries;
 		if (srwm < 0)
@@ -1455,7 +1456,7 @@
 			      entries, srwm);
 
 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * crtc->cursor->state->crtc_w;
+			cpp * crtc->cursor->state->crtc_w;
 		entries = DIV_ROUND_UP(entries,
 					  i965_cursor_wm_info.cacheline_size);
 		cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1516,7 +1517,7 @@
 	crtc = intel_get_crtc_for_plane(dev, 0);
 	if (intel_crtc_active(crtc)) {
 		const struct drm_display_mode *adjusted_mode;
-		int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 		if (IS_GEN2(dev))
 			cpp = 4;
 
@@ -1538,7 +1539,7 @@
 	crtc = intel_get_crtc_for_plane(dev, 1);
 	if (intel_crtc_active(crtc)) {
 		const struct drm_display_mode *adjusted_mode;
-		int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 		if (IS_GEN2(dev))
 			cpp = 4;
 
@@ -1584,7 +1585,7 @@
 		int clock = adjusted_mode->crtc_clock;
 		int htotal = adjusted_mode->crtc_htotal;
 		int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
-		int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
+		int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
 		unsigned long line_time_us;
 		int entries;
 
@@ -1592,7 +1593,7 @@
 
 		/* Use ns/us then divide to preserve precision */
 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-			pixel_size * hdisplay;
+			cpp * hdisplay;
 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
 		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
 		srwm = wm_info->fifo_size - entries;
@@ -1672,6 +1673,9 @@
 		if (pipe_h < pfit_h)
 			pipe_h = pfit_h;
 
+		if (WARN_ON(!pfit_w || !pfit_h))
+			return pixel_rate;
+
 		pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
 				     pfit_w * pfit_h);
 	}
@@ -1680,15 +1684,14 @@
 }
 
 /* latency must be in 0.1us units. */
-static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
-			       uint32_t latency)
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
 {
 	uint64_t ret;
 
 	if (WARN(latency == 0, "Latency value missing\n"))
 		return UINT_MAX;
 
-	ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+	ret = (uint64_t) pixel_rate * cpp * latency;
 	ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
 
 	return ret;
@@ -1696,24 +1699,37 @@
 
 /* latency must be in 0.1us units. */
 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+			       uint32_t horiz_pixels, uint8_t cpp,
 			       uint32_t latency)
 {
 	uint32_t ret;
 
 	if (WARN(latency == 0, "Latency value missing\n"))
 		return UINT_MAX;
+	if (WARN_ON(!pipe_htotal))
+		return UINT_MAX;
 
 	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
-	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = (ret + 1) * horiz_pixels * cpp;
 	ret = DIV_ROUND_UP(ret, 64) + 2;
 	return ret;
 }
 
 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
-			   uint8_t bytes_per_pixel)
+			   uint8_t cpp)
 {
-	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+	/*
+	 * Neither of these should be possible since this function shouldn't be
+	 * called if the CRTC is off or the plane is invisible.  But let's be
+	 * extra paranoid to avoid a potential divide-by-zero if we screw up
+	 * elsewhere in the driver.
+	 */
+	if (WARN_ON(!cpp))
+		return 0;
+	if (WARN_ON(!horiz_pixels))
+		return 0;
+
+	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
 }
 
 struct ilk_wm_maximums {
@@ -1732,13 +1748,14 @@
 				   uint32_t mem_value,
 				   bool is_lp)
 {
-	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	int cpp = pstate->base.fb ?
+		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 	uint32_t method1, method2;
 
 	if (!cstate->base.active || !pstate->visible)
 		return 0;
 
-	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
 
 	if (!is_lp)
 		return method1;
@@ -1746,8 +1763,7 @@
 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->dst),
-				 bpp,
-				 mem_value);
+				 cpp, mem_value);
 
 	return min(method1, method2);
 }
@@ -1760,18 +1776,18 @@
 				   const struct intel_plane_state *pstate,
 				   uint32_t mem_value)
 {
-	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	int cpp = pstate->base.fb ?
+		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 	uint32_t method1, method2;
 
 	if (!cstate->base.active || !pstate->visible)
 		return 0;
 
-	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->dst),
-				 bpp,
-				 mem_value);
+				 cpp, mem_value);
 	return min(method1, method2);
 }
 
@@ -1804,12 +1820,13 @@
 				   const struct intel_plane_state *pstate,
 				   uint32_t pri_val)
 {
-	int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+	int cpp = pstate->base.fb ?
+		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 
 	if (!cstate->base.active || !pstate->visible)
 		return 0;
 
-	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
+	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
 }
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2002,14 +2019,19 @@
 }
 
 static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+hsw_compute_linetime_wm(struct drm_device *dev,
+			struct intel_crtc_state *cstate)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode =
+		&cstate->base.adjusted_mode;
 	u32 linetime, ips_linetime;
 
-	if (!intel_crtc->active)
+	if (!cstate->base.active)
+		return 0;
+	if (WARN_ON(adjusted_mode->crtc_clock == 0))
+		return 0;
+	if (WARN_ON(dev_priv->cdclk_freq == 0))
 		return 0;
 
 	/* The WM are computed with base on how long it takes to fill a single
@@ -2281,6 +2303,7 @@
 		return PTR_ERR(cstate);
 
 	pipe_wm = &cstate->wm.optimal.ilk;
+	memset(pipe_wm, 0, sizeof(*pipe_wm));
 
 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
 		ps = drm_atomic_get_plane_state(state,
@@ -2317,8 +2340,7 @@
 			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		pipe_wm->linetime = hsw_compute_linetime_wm(dev,
-							    &intel_crtc->base);
+		pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
 
 	/* LP0 watermarks always use 1/2 DDB partitioning */
 	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
@@ -3028,26 +3050,25 @@
 
 /*
  * The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * for the read latency) and cpp should always be <= 8, so that
  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
 */
-static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
-			       uint32_t latency)
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
 {
 	uint32_t wm_intermediate_val, ret;
 
 	if (latency == 0)
 		return UINT_MAX;
 
-	wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
+	wm_intermediate_val = latency * pixel_rate * cpp / 512;
 	ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
 
 	return ret;
 }
 
 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+			       uint32_t horiz_pixels, uint8_t cpp,
 			       uint64_t tiling, uint32_t latency)
 {
 	uint32_t ret;
@@ -3057,7 +3078,7 @@
 	if (latency == 0)
 		return UINT_MAX;
 
-	plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+	plane_bytes_per_line = horiz_pixels * cpp;
 
 	if (tiling == I915_FORMAT_MOD_Y_TILED ||
 	    tiling == I915_FORMAT_MOD_Yf_TILED) {
@@ -3107,23 +3128,21 @@
 	uint32_t plane_bytes_per_line, plane_blocks_per_line;
 	uint32_t res_blocks, res_lines;
 	uint32_t selected_result;
-	uint8_t bytes_per_pixel;
+	uint8_t cpp;
 
 	if (latency == 0 || !cstate->base.active || !fb)
 		return false;
 
-	bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
+	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
-				 bytes_per_pixel,
-				 latency);
+				 cpp, latency);
 	method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 cstate->pipe_src_w,
-				 bytes_per_pixel,
-				 fb->modifier[0],
+				 cpp, fb->modifier[0],
 				 latency);
 
-	plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
+	plane_bytes_per_line = cstate->pipe_src_w * cpp;
 	plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
 	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -3131,11 +3150,11 @@
 		uint32_t min_scanlines = 4;
 		uint32_t y_tile_minimum;
 		if (intel_rotation_90_or_270(plane->state->rotation)) {
-			int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+			int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
 				drm_format_plane_cpp(fb->pixel_format, 1) :
 				drm_format_plane_cpp(fb->pixel_format, 0);
 
-			switch (bpp) {
+			switch (cpp) {
 			case 1:
 				min_scanlines = 16;
 				break;
@@ -3606,23 +3625,45 @@
 	dev_priv->wm.skl_hw = *results;
 }
 
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_compute_wm_config(struct drm_device *dev,
+				  struct intel_wm_config *config)
 {
-	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *crtc;
+
+	/* Compute the currently _active_ config */
+	for_each_intel_crtc(dev, crtc) {
+		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+		if (!wm->pipe_enabled)
+			continue;
+
+		config->sprites_enabled |= wm->sprites_enabled;
+		config->sprites_scaled |= wm->sprites_scaled;
+		config->num_pipes_active++;
+	}
+}
+
+static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+{
+	struct drm_crtc *crtc = cstate->base.crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
 	struct ilk_wm_maximums max;
-	struct intel_wm_config *config = &dev_priv->wm.config;
+	struct intel_wm_config config = {};
 	struct ilk_wm_values results = {};
 	enum intel_ddb_partitioning partitioning;
 
-	ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
-	ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
+	ilk_compute_wm_config(dev, &config);
+
+	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
 	/* 5/6 split only in single pipe config on IVB+ */
 	if (INTEL_INFO(dev)->gen >= 7 &&
-	    config->num_pipes_active == 1 && config->sprites_enabled) {
-		ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
-		ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
+	    config.num_pipes_active == 1 && config.sprites_enabled) {
+		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
 		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
 	} else {
@@ -3639,7 +3680,6 @@
 
 static void ilk_update_wm(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
@@ -3659,7 +3699,7 @@
 
 	intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
 
-	ilk_program_watermarks(dev_priv);
+	ilk_program_watermarks(cstate);
 }
 
 static void skl_pipe_wm_active_state(uint32_t val,
@@ -4045,7 +4085,7 @@
 		dev_priv->display.update_wm(crtc);
 }
 
-/**
+/*
  * Lock protecting IPS related data structures
  */
 DEFINE_SPINLOCK(mchdev_lock);
@@ -4081,11 +4121,13 @@
 static void ironlake_enable_drps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 rgvmodectl = I915_READ(MEMMODECTL);
+	u32 rgvmodectl;
 	u8 fmax, fmin, fstart, vstart;
 
 	spin_lock_irq(&mchdev_lock);
 
+	rgvmodectl = I915_READ(MEMMODECTL);
+
 	/* Enable temp reporting */
 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -4518,21 +4560,71 @@
 	}
 	if (HAS_RC6p(dev))
 		DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
-			      (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-			      (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-			      (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+			      onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+			      onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+			      onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
 
 	else
 		DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
-			      (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+			      onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
 }
 
-static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable_rc6 = true;
+	unsigned long rc6_ctx_base;
+
+	if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+		DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+		enable_rc6 = false;
+	}
+
+	/*
+	 * The exact context size is not known for BXT, so assume a page size
+	 * for this check.
+	 */
+	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+	if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
+	      (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
+					dev_priv->gtt.stolen_reserved_size))) {
+		DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+		enable_rc6 = false;
+	}
+
+	if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
+	      ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
+	      ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
+	      ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
+		DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+		enable_rc6 = false;
+	}
+
+	if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
+					    GEN6_RC_CTL_HW_ENABLE)) &&
+	    ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
+	     !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
+		DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+		enable_rc6 = false;
+	}
+
+	return enable_rc6;
+}
+
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
 	/* No RC6 before Ironlake and code is gone for ilk. */
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
+	if (!enable_rc6)
+		return 0;
+
+	if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+		DRM_INFO("RC6 disabled by BIOS\n");
+		return 0;
+	}
+
 	/* Respect the kernel parameter if it is set */
 	if (enable_rc6 >= 0) {
 		int mask;
@@ -4702,8 +4794,7 @@
 	/* 3a: Enable RC6 */
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
-			"on" : "off");
+	DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
 	/* WaRsUseTimeoutMode */
 	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
 	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
@@ -4722,8 +4813,7 @@
 	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
 	 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
 	 */
-	if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
-	    ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+	if (NEEDS_WaRsDisableCoarsePowerGating(dev))
 		I915_WRITE(GEN9_PG_ENABLE, 0);
 	else
 		I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -5146,8 +5236,6 @@
 	u32 pcbr;
 	int pctx_size = 32*1024;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	pcbr = I915_READ(VLV_PCBR);
 	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
 		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
@@ -5169,7 +5257,7 @@
 	u32 pcbr;
 	int pctx_size = 24*1024;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+	mutex_lock(&dev->struct_mutex);
 
 	pcbr = I915_READ(VLV_PCBR);
 	if (pcbr) {
@@ -5197,7 +5285,7 @@
 	pctx = i915_gem_object_create_stolen(dev, pctx_size);
 	if (!pctx) {
 		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
-		return;
+		goto out;
 	}
 
 	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
@@ -5206,6 +5294,7 @@
 out:
 	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
 	dev_priv->vlv_pctx = pctx;
+	mutex_unlock(&dev->struct_mutex);
 }
 
 static void valleyview_cleanup_pctx(struct drm_device *dev)
@@ -5215,7 +5304,7 @@
 	if (WARN_ON(!dev_priv->vlv_pctx))
 		return;
 
-	drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+	drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
 	dev_priv->vlv_pctx = NULL;
 }
 
@@ -6024,7 +6113,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 	/*
 	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
 	 * requirement.
@@ -6159,8 +6247,8 @@
 		return;
 
 	if (IS_IRONLAKE_M(dev)) {
-		mutex_lock(&dev->struct_mutex);
 		ironlake_enable_drps(dev);
+		mutex_lock(&dev->struct_mutex);
 		intel_init_emon(dev);
 		mutex_unlock(&dev->struct_mutex);
 	} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6990,6 +7078,7 @@
 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
 			dev_priv->display.update_wm = ilk_update_wm;
 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
+			dev_priv->display.program_watermarks = ilk_program_watermarks;
 		} else {
 			DRM_DEBUG_KMS("Failed to read display plane latency. "
 				      "Disable CxSR\n");
@@ -7155,9 +7244,10 @@
 {
 	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
-	div = vlv_gpu_freq_div(czclk_freq) / 2;
+	div = vlv_gpu_freq_div(czclk_freq);
 	if (div < 0)
 		return div;
+	div /= 2;
 
 	return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
 }
@@ -7166,9 +7256,10 @@
 {
 	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
-	mul = vlv_gpu_freq_div(czclk_freq) / 2;
+	mul = vlv_gpu_freq_div(czclk_freq);
 	if (mul < 0)
 		return mul;
+	mul /= 2;
 
 	/* CHV needs even values */
 	return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 9ccff30..0b42ada 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -225,7 +225,12 @@
 		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
 	}
 
-	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
+	if (dev_priv->psr.link_standby)
+		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+	else
+		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+				   DP_PSR_ENABLE);
 }
 
 static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -280,6 +285,9 @@
 	if (IS_HASWELL(dev))
 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
+	if (dev_priv->psr.link_standby)
+		val |= EDP_PSR_LINK_STANDBY;
+
 	I915_WRITE(EDP_PSR_CTL, val |
 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -304,8 +312,15 @@
 
 	dev_priv->psr.source_ok = false;
 
-	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
-		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+	/*
+	 * HSW spec explicitly says PSR is tied to port A.
+	 * BDW+ platforms with DDI implementation of PSR have different
+	 * PSR registers per transcoder and we only implement transcoder EDP
+	 * ones. Since by Display design transcoder EDP is tied to port A
+	 * we can safely escape based on the port A.
+	 */
+	if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
 		return false;
 	}
 
@@ -314,6 +329,12 @@
 		return false;
 	}
 
+	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+	    !dev_priv->psr.link_standby) {
+		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
+		return false;
+	}
+
 	if (IS_HASWELL(dev) &&
 	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
 		      S3D_ENABLE) {
@@ -327,12 +348,6 @@
 		return false;
 	}
 
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
-	    ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
-		DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
-		return false;
-	}
-
 	dev_priv->psr.source_ok = true;
 	return true;
 }
@@ -763,6 +778,36 @@
 	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
 		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
+	/* Per platform default */
+	if (i915.enable_psr == -1) {
+		if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+		    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+			i915.enable_psr = 1;
+		else
+			i915.enable_psr = 0;
+	}
+
+	/* Set link_standby x link_off defaults */
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		/* HSW and BDW require workarounds that we don't implement. */
+		dev_priv->psr.link_standby = false;
+	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+		/* On VLV and CHV only standby mode is supported. */
+		dev_priv->psr.link_standby = true;
+	else
+		/* For new platforms let's respect VBT back again */
+		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+
+	/* Override link_standby x link_off defaults */
+	if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
+		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
+		dev_priv->psr.link_standby = true;
+	}
+	if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
+		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
+		dev_priv->psr.link_standby = false;
+	}
+
 	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
 	mutex_init(&dev_priv->psr.lock);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 40c6aff..45ce45a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -746,9 +746,9 @@
 
 	ret = i915_gem_render_state_init(req);
 	if (ret)
-		DRM_ERROR("init render state: %d\n", ret);
+		return ret;
 
-	return ret;
+	return 0;
 }
 
 static int wa_add(struct drm_i915_private *dev_priv,
@@ -789,6 +789,22 @@
 
 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
+static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct i915_workarounds *wa = &dev_priv->workarounds;
+	const uint32_t index = wa->hw_whitelist_count[ring->id];
+
+	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+		return -EINVAL;
+
+	WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+		 i915_mmio_reg_offset(reg));
+	wa->hw_whitelist_count[ring->id]++;
+
+	return 0;
+}
+
 static int gen8_init_workarounds(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -894,6 +910,7 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
+	int ret;
 
 	/* WaEnableLbsSlaRetryTimerDecrement:skl */
 	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
@@ -964,6 +981,20 @@
 	/* WaDisableSTUnitPowerOptimization:skl,bxt */
 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
+	/* WaOCLCoherentLineFlush:skl,bxt */
+	I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+				    GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+	ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+	if (ret)
+		return ret;
+
+	/* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+	ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -1019,6 +1050,16 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * Actual WA is to disable percontext preemption granularity control
+	 * until D0 which is the default case so this is equivalent to
+	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
+	 */
+	if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+		I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+			   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+	}
+
 	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
 		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
 		I915_WRITE(FF_SLICE_CS_CHICKEN2,
@@ -1071,6 +1112,11 @@
 			GEN7_HALF_SLICE_CHICKEN1,
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+	/* WaDisableLSQCROPERFforOCL:skl */
+	ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+	if (ret)
+		return ret;
+
 	return skl_tune_iz_hashing(ring);
 }
 
@@ -1106,6 +1152,20 @@
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 	}
 
+	/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+	/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+	/* WaDisableLSQCROPERFforOCL:bxt */
+	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+		ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+		if (ret)
+			return ret;
+
+		ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -1117,6 +1177,7 @@
 	WARN_ON(ring->id != RCS);
 
 	dev_priv->workarounds.count = 0;
+	dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
 	if (IS_BROADWELL(dev))
 		return bdw_init_workarounds(ring);
@@ -1867,15 +1928,13 @@
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 4);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
 	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
 					0 : MI_BATCH_NON_SECURE));
-	intel_ring_emit(ring, offset + len - 8);
-	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
 	return 0;
@@ -1901,6 +1960,17 @@
 	return 0;
 }
 
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+	if (!dev_priv->status_page_dmah)
+		return;
+
+	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+	ring->status_page.page_addr = NULL;
+}
+
 static void cleanup_status_page(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_object *obj;
@@ -1917,9 +1987,9 @@
 
 static int init_status_page(struct intel_engine_cs *ring)
 {
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj = ring->status_page.obj;
 
-	if ((obj = ring->status_page.obj) == NULL) {
+	if (obj == NULL) {
 		unsigned flags;
 		int ret;
 
@@ -1990,6 +2060,7 @@
 	else
 		iounmap(ringbuf->virtual_start);
 	ringbuf->virtual_start = NULL;
+	ringbuf->vma = NULL;
 	i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
@@ -2048,6 +2119,9 @@
 			return ret;
 		}
 
+		/* Access through the GTT requires the device to be awake. */
+		assert_rpm_wakelock_held(dev_priv);
+
 		ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
 						    i915_gem_obj_ggtt_offset(obj), ringbuf->size);
 		if (ringbuf->virtual_start == NULL) {
@@ -2056,6 +2130,8 @@
 		}
 	}
 
+	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+
 	return 0;
 }
 
@@ -2164,7 +2240,7 @@
 		if (ret)
 			goto error;
 	} else {
-		BUG_ON(ring->id != RCS);
+		WARN_ON(ring->id != RCS);
 		ret = init_phys_status_page(ring);
 		if (ret)
 			goto error;
@@ -2210,7 +2286,12 @@
 	if (ring->cleanup)
 		ring->cleanup(ring);
 
-	cleanup_status_page(ring);
+	if (I915_NEED_GFX_HWS(ring->dev)) {
+		cleanup_status_page(ring);
+	} else {
+		WARN_ON(ring->id != RCS);
+		cleanup_phys_status_page(ring);
+	}
 
 	i915_cmd_parser_fini_ring(ring);
 	i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2666,6 +2747,7 @@
 
 	ring->name = "render ring";
 	ring->id = RCS;
+	ring->exec_id = I915_EXEC_RENDER;
 	ring->mmio_base = RENDER_RING_BASE;
 
 	if (INTEL_INFO(dev)->gen >= 8) {
@@ -2814,6 +2896,7 @@
 
 	ring->name = "bsd ring";
 	ring->id = VCS;
+	ring->exec_id = I915_EXEC_BSD;
 
 	ring->write_tail = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 6) {
@@ -2890,6 +2973,7 @@
 
 	ring->name = "bsd2 ring";
 	ring->id = VCS2;
+	ring->exec_id = I915_EXEC_BSD;
 
 	ring->write_tail = ring_write_tail;
 	ring->mmio_base = GEN8_BSD2_RING_BASE;
@@ -2920,6 +3004,7 @@
 
 	ring->name = "blitter ring";
 	ring->id = BCS;
+	ring->exec_id = I915_EXEC_BLT;
 
 	ring->mmio_base = BLT_RING_BASE;
 	ring->write_tail = ring_write_tail;
@@ -2977,6 +3062,7 @@
 
 	ring->name = "video enhancement ring";
 	ring->id = VECS;
+	ring->exec_id = I915_EXEC_VEBOX;
 
 	ring->mmio_base = VEBOX_RING_BASE;
 	ring->write_tail = ring_write_tail;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49574ff..566b0ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -93,11 +93,13 @@
 	int score;
 	enum intel_ring_hangcheck_action action;
 	int deadlock;
+	u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
 struct intel_ringbuffer {
 	struct drm_i915_gem_object *obj;
 	void __iomem *virtual_start;
+	struct i915_vma *vma;
 
 	struct intel_engine_cs *ring;
 	struct list_head link;
@@ -147,14 +149,16 @@
 struct  intel_engine_cs {
 	const char	*name;
 	enum intel_ring_id {
-		RCS = 0x0,
-		VCS,
+		RCS = 0,
 		BCS,
-		VECS,
-		VCS2
+		VCS,
+		VCS2,	/* Keep instances of the same type engine together. */
+		VECS
 	} id;
 #define I915_NUM_RINGS 5
-#define LAST_USER_RING (VECS + 1)
+#define _VCS(n) (VCS + (n))
+	unsigned int exec_id;
+	unsigned int guc_id;
 	u32		mmio_base;
 	struct		drm_device *dev;
 	struct intel_ringbuffer *buffer;
@@ -268,6 +272,8 @@
 	struct list_head execlist_queue;
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
+	bool disable_lite_restore_wa;
+	u32 ctx_desc_template;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
 	int		(*emit_request)(struct drm_i915_gem_request *request);
 	int		(*emit_flush)(struct drm_i915_gem_request *request,
@@ -305,7 +311,6 @@
 
 	wait_queue_head_t irq_queue;
 
-	struct intel_context *default_context;
 	struct intel_context *last_context;
 
 	struct intel_ring_hangcheck hangcheck;
@@ -406,7 +411,7 @@
 	ring->status_page.page_addr[reg] = value;
 }
 
-/**
+/*
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  * MI_STORE_DATA_IMM.
@@ -423,6 +428,7 @@
  * The area from dword 0x30 to 0x3ff is available for driver usage.
  */
 #define I915_GEM_HWS_INDEX		0x30
+#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4f43d9b..6e54d97 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -284,6 +284,13 @@
 						1 << PIPE_C | 1 << PIPE_B);
 }
 
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
+{
+	if (IS_BROADWELL(dev_priv))
+		gen8_irq_power_well_pre_disable(dev_priv,
+						1 << PIPE_C | 1 << PIPE_B);
+}
+
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
@@ -309,6 +316,14 @@
 	}
 }
 
+static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
+				       struct i915_power_well *power_well)
+{
+	if (power_well->data == SKL_DISP_PW_2)
+		gen8_irq_power_well_pre_disable(dev_priv,
+						1 << PIPE_C | 1 << PIPE_B);
+}
+
 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 			       struct i915_power_well *power_well, bool enable)
 {
@@ -334,6 +349,7 @@
 
 	} else {
 		if (enable_requested) {
+			hsw_power_well_pre_disable(dev_priv);
 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
 			POSTING_READ(HSW_PWR_WELL_DRIVER);
 			DRM_DEBUG_KMS("Requesting to disable the power well\n");
@@ -456,15 +472,19 @@
 	  */
 }
 
-static void gen9_set_dc_state_debugmask_memory_up(
-			struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 {
-	uint32_t val;
+	uint32_t val, mask;
+
+	mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+	if (IS_BROXTON(dev_priv))
+		mask |= DC_STATE_DEBUG_MASK_CORES;
 
 	/* The below bit doesn't need to be cleared ever afterwards */
 	val = I915_READ(DC_STATE_DEBUG);
-	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
-		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+	if ((val & mask) != mask) {
+		val |= mask;
 		I915_WRITE(DC_STATE_DEBUG, val);
 		POSTING_READ(DC_STATE_DEBUG);
 	}
@@ -525,9 +545,6 @@
 	else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
 		state = DC_STATE_EN_UPTO_DC5;
 
-	if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
-		gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
 	val = I915_READ(DC_STATE_EN);
 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
 		      val & mask, state);
@@ -577,7 +594,8 @@
 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
 					SKL_DISP_PW_2);
 
-	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+		  "Platform doesn't support DC5.\n");
 	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
@@ -613,7 +631,8 @@
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+		  "Platform doesn't support DC6.\n");
 	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 		  "Backlight is not disabled.\n");
@@ -640,7 +659,8 @@
 {
 	assert_can_disable_dc5(dev_priv);
 
-	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+	    i915.enable_dc != 0 && i915.enable_dc != 1)
 		assert_can_disable_dc6(dev_priv);
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -668,7 +688,6 @@
 static void skl_set_power_well(struct drm_i915_private *dev_priv,
 			struct i915_power_well *power_well, bool enable)
 {
-	struct drm_device *dev = dev_priv->dev;
 	uint32_t tmp, fuse_status;
 	uint32_t req_mask, state_mask;
 	bool is_enabled, enable_requested, check_fuse_status = false;
@@ -706,23 +725,15 @@
 	state_mask = SKL_POWER_WELL_STATE(power_well->data);
 	is_enabled = tmp & state_mask;
 
+	if (!enable && enable_requested)
+		skl_power_well_pre_disable(dev_priv, power_well);
+
 	if (enable) {
 		if (!enable_requested) {
 			WARN((tmp & state_mask) &&
 				!I915_READ(HSW_PWR_WELL_BIOS),
 				"Invalid for power well status to be enabled, unless done by the BIOS, \
 				when request is to disable!\n");
-			if (power_well->data == SKL_DISP_PW_2) {
-				/*
-				 * DDI buffer programming unnecessary during
-				 * driver-load/resume as it's already done
-				 * during modeset initialization then. It's
-				 * also invalid here as encoder list is still
-				 * uninitialized.
-				 */
-				if (!dev_priv->power_domains.initializing)
-					intel_prepare_ddi(dev);
-			}
 			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
 		}
 
@@ -828,7 +839,8 @@
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+	    i915.enable_dc != 0 && i915.enable_dc != 1)
 		skl_enable_dc6(dev_priv);
 	else
 		gen9_enable_dc5(dev_priv);
@@ -840,7 +852,8 @@
 	if (power_well->count > 0) {
 		gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 	} else {
-		if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
+		if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+		    i915.enable_dc != 0 &&
 		    i915.enable_dc != 1)
 			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 		else
@@ -993,6 +1006,9 @@
 	valleyview_disable_display_irqs(dev_priv);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
+	/* make sure we're done processing display irqs */
+	synchronize_irq(dev_priv->dev->irq);
+
 	vlv_power_sequencer_reset(dev_priv);
 }
 
@@ -1941,7 +1957,7 @@
 {
 	struct i915_power_well *well;
 
-	if (!IS_SKYLAKE(dev_priv))
+	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
 		return;
 
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@@ -1955,7 +1971,7 @@
 {
 	struct i915_power_well *well;
 
-	if (!IS_SKYLAKE(dev_priv))
+	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
 		return;
 
 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@@ -2125,8 +2141,8 @@
 
 	skl_init_cdclk(dev_priv);
 
-	if (dev_priv->csr.dmc_payload)
-		intel_csr_load_program(dev_priv);
+	if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
+		gen9_set_dc_state_debugmask(dev_priv);
 }
 
 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2e1da06..4ecc076 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1527,6 +1527,7 @@
 		      struct drm_display_mode *mode)
 {
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
@@ -1537,6 +1538,9 @@
 	if (intel_sdvo->pixel_clock_max < mode->clock)
 		return MODE_CLOCK_HIGH;
 
+	if (mode->clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
+
 	if (intel_sdvo->is_lvds) {
 		if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
 			return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 2e2d4eb..db0ed49 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -24,8 +24,8 @@
  *	Eric Anholt <eric@anholt.net>
  */
 
-/**
- * @file SDVO command definitions and structures.
+/*
+ * SDVO command definitions and structures.
  */
 
 #define SDVO_OUTPUT_FIRST   (0)
@@ -66,39 +66,39 @@
 #define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
 #define DTD_FLAG_INTERLACE	(1 << 7)
 
-/** This matches the EDID DTD structure, more or less */
+/* This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
 	struct {
-		u16 clock;	/**< pixel clock, in 10kHz units */
-		u8 h_active;	/**< lower 8 bits (pixels) */
-		u8 h_blank;	/**< lower 8 bits (pixels) */
-		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
-		u8 v_active;	/**< lower 8 bits (lines) */
-		u8 v_blank;	/**< lower 8 bits (lines) */
-		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
+		u16 clock;	/* pixel clock, in 10kHz units */
+		u8 h_active;	/* lower 8 bits (pixels) */
+		u8 h_blank;	/* lower 8 bits (pixels) */
+		u8 h_high;	/* upper 4 bits each h_active, h_blank */
+		u8 v_active;	/* lower 8 bits (lines) */
+		u8 v_blank;	/* lower 8 bits (lines) */
+		u8 v_high;	/* upper 4 bits each v_active, v_blank */
 	} part1;
 
 	struct {
-		u8 h_sync_off;	/**< lower 8 bits, from hblank start */
-		u8 h_sync_width;	/**< lower 8 bits (pixels) */
-		/** lower 4 bits each vsync offset, vsync width */
+		u8 h_sync_off;	/* lower 8 bits, from hblank start */
+		u8 h_sync_width;	/* lower 8 bits (pixels) */
+		/* lower 4 bits each vsync offset, vsync width */
 		u8 v_sync_off_width;
-		/**
+		/*
 		* 2 high bits of hsync offset, 2 high bits of hsync width,
 		* bits 4-5 of vsync offset, and 2 high bits of vsync width.
 		*/
 		u8 sync_off_width_high;
 		u8 dtd_flags;
 		u8 sdvo_flags;
-		/** bits 6-7 of vsync offset at bits 6-7 */
+		/* bits 6-7 of vsync offset at bits 6-7 */
 		u8 v_sync_off_high;
 		u8 reserved;
 	} part2;
 } __packed;
 
 struct intel_sdvo_pixel_clock_range {
-	u16 min;	/**< pixel clock, in 10kHz units */
-	u16 max;	/**< pixel clock, in 10kHz units */
+	u16 min;	/* pixel clock, in 10kHz units */
+	u16 max;	/* pixel clock, in 10kHz units */
 } __packed;
 
 struct intel_sdvo_preferred_input_timing_args {
@@ -144,7 +144,7 @@
 
 #define SDVO_CMD_RESET					0x01
 
-/** Returns a struct intel_sdvo_caps */
+/* Returns a struct intel_sdvo_caps */
 #define SDVO_CMD_GET_DEVICE_CAPS			0x02
 
 #define SDVO_CMD_GET_FIRMWARE_REV			0x86
@@ -152,7 +152,7 @@
 # define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
 # define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
 
-/**
+/*
  * Reports which inputs are trained (managed to sync).
  *
  * Devices must have trained within 2 vsyncs of a mode change.
@@ -164,10 +164,10 @@
 	unsigned int pad:6;
 } __packed;
 
-/** Returns a struct intel_sdvo_output_flags of active outputs. */
+/* Returns a struct intel_sdvo_output_flags of active outputs. */
 #define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
 
-/**
+/*
  * Sets the current set of active outputs.
  *
  * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
@@ -175,7 +175,7 @@
  */
 #define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
 
-/**
+/*
  * Returns the current mapping of SDVO inputs to outputs on the device.
  *
  * Returns two struct intel_sdvo_output_flags structures.
@@ -185,29 +185,29 @@
 	u16 in0, in1;
 };
 
-/**
+/*
  * Sets the current mapping of SDVO inputs to outputs on the device.
  *
  * Takes two struct i380_sdvo_output_flags structures.
  */
 #define SDVO_CMD_SET_IN_OUT_MAP				0x07
 
-/**
+/*
  * Returns a struct intel_sdvo_output_flags of attached displays.
  */
 #define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
 
-/**
+/*
  * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
  */
 #define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
 
-/**
+/*
  * Takes a struct intel_sdvo_output_flags.
  */
 #define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
 
-/**
+/*
  * Returns a struct intel_sdvo_output_flags of displays with hot plug
  * interrupts enabled.
  */
@@ -221,7 +221,7 @@
 	unsigned int pad:6;
 } __packed;
 
-/**
+/*
  * Selects which input is affected by future input commands.
  *
  * Commands affected include SET_INPUT_TIMINGS_PART[12],
@@ -234,7 +234,7 @@
 	unsigned int pad:7;
 } __packed;
 
-/**
+/*
  * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
  * future output commands.
  *
@@ -280,7 +280,7 @@
 # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
 # define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
 
-/**
+/*
  * Generates a DTD based on the given width, height, and flags.
  *
  * This will be supported by any device supporting scaling or interlaced
@@ -300,24 +300,24 @@
 #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
 #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
 
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
 #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
 #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
 
-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
 #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
 
-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
 #define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
 #define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
 # define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
 # define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
 # define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
 
 #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/* 6 bytes of bit flags for TV formats shared by all TV format functions */
 struct intel_sdvo_tv_format {
 	unsigned int ntsc_m:1;
 	unsigned int ntsc_j:1;
@@ -376,7 +376,7 @@
 
 #define SDVO_CMD_SET_TV_FORMAT				0x29
 
-/** Returns the resolutiosn that can be used with the given TV format */
+/* Returns the resolutiosn that can be used with the given TV format */
 #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
 struct intel_sdvo_sdtv_resolution_request {
 	unsigned int ntsc_m:1;
@@ -539,7 +539,7 @@
 #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
 #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
 #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
-/**
+/*
  * The panel power sequencing parameters are in units of milliseconds.
  * The high fields are bits 8:9 of the 10-bit values.
  */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 8831fc5..c399818 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -129,17 +129,18 @@
 	return val;
 }
 
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
 {
 	u32 val = 0;
-	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
 			SB_CRRDDA_NP, reg, &val);
 	return val;
 }
 
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
+		       u8 port, u32 reg, u32 val)
 {
-	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
 			SB_CRWRDA_NP, reg, &val);
 }
 
@@ -171,20 +172,6 @@
 			SB_CRWRDA_NP, reg, &val);
 }
 
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
-{
-	u32 val = 0;
-	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
-			SB_CRRDDA_NP, reg, &val);
-	return val;
-}
-
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
-{
-	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
-			SB_CRWRDA_NP, reg, &val);
-}
-
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 {
 	u32 val = 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4ff7a1f..a2582c4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -178,28 +178,33 @@
 }
 
 static void
-skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
-		 struct drm_framebuffer *fb,
-		 int crtc_x, int crtc_y,
-		 unsigned int crtc_w, unsigned int crtc_h,
-		 uint32_t x, uint32_t y,
-		 uint32_t src_w, uint32_t src_h)
+skl_update_plane(struct drm_plane *drm_plane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = drm_plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
 	u32 plane_ctl, stride_div, stride;
-	const struct drm_intel_sprite_colorkey *key =
-		&to_intel_plane_state(drm_plane->state)->ckey;
+	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
 	u32 surf_addr;
 	u32 tile_height, plane_offset, plane_size;
 	unsigned int rotation;
 	int x_offset, y_offset;
-	struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
-	int scaler_id;
+	int crtc_x = plane_state->dst.x1;
+	int crtc_y = plane_state->dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+	uint32_t x = plane_state->src.x1 >> 16;
+	uint32_t y = plane_state->src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+	const struct intel_scaler *scaler =
+		&crtc_state->scaler_state.scalers[plane_state->scaler_id];
 
 	plane_ctl = PLANE_CTL_ENABLE |
 		PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -208,14 +213,12 @@
 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 
-	rotation = drm_plane->state->rotation;
+	rotation = plane_state->base.rotation;
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-	stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
 					       fb->pixel_format);
 
-	scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
-
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -236,9 +239,10 @@
 	surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
 
 	if (intel_rotation_90_or_270(rotation)) {
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
 		/* stride: Surface height in tiles */
-		tile_height = intel_tile_height(dev, fb->pixel_format,
-						fb->modifier[0], 0);
+		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
 		stride = DIV_ROUND_UP(fb->height, tile_height);
 		plane_size = (src_w << 16) | src_h;
 		x_offset = stride * tile_height - y - (src_h + 1);
@@ -256,13 +260,13 @@
 	I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
 
 	/* program plane scaler */
-	if (scaler_id >= 0) {
+	if (plane_state->scaler_id >= 0) {
 		uint32_t ps_ctrl = 0;
+		int scaler_id = plane_state->scaler_id;
 
 		DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
 			PS_PLANE_SEL(plane));
-		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
-			crtc_state->scaler_state.scalers[scaler_id].mode;
+		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
@@ -334,24 +338,29 @@
 }
 
 static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
-		 struct drm_framebuffer *fb,
-		 int crtc_x, int crtc_y,
-		 unsigned int crtc_w, unsigned int crtc_h,
-		 uint32_t x, uint32_t y,
-		 uint32_t src_w, uint32_t src_h)
+vlv_update_plane(struct drm_plane *dplane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int pipe = intel_plane->pipe;
 	int plane = intel_plane->plane;
 	u32 sprctl;
-	unsigned long sprsurf_offset, linear_offset;
-	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key =
-		&to_intel_plane_state(dplane->state)->ckey;
+	u32 sprsurf_offset, linear_offset;
+	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+	int crtc_x = plane_state->dst.x1;
+	int crtc_y = plane_state->dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+	uint32_t x = plane_state->src.x1 >> 16;
+	uint32_t y = plane_state->src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
 	sprctl = SP_ENABLE;
 
@@ -413,20 +422,18 @@
 	crtc_w--;
 	crtc_h--;
 
-	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
-							&x, &y,
-							obj->tiling_mode,
-							pixel_size,
-							fb->pitches[0]);
+	linear_offset = y * fb->pitches[0] + x * cpp;
+	sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+						   fb->modifier[0], cpp,
+						   fb->pitches[0]);
 	linear_offset -= sprsurf_offset;
 
-	if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
+	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
 		sprctl |= SP_ROTATE_180;
 
 		x += src_w;
 		y += src_h;
-		linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+		linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 	}
 
 	if (key->flags) {
@@ -474,23 +481,28 @@
 }
 
 static void
-ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-		 struct drm_framebuffer *fb,
-		 int crtc_x, int crtc_y,
-		 unsigned int crtc_w, unsigned int crtc_h,
-		 uint32_t x, uint32_t y,
-		 uint32_t src_w, uint32_t src_h)
+ivb_update_plane(struct drm_plane *plane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	enum pipe pipe = intel_plane->pipe;
 	u32 sprctl, sprscale = 0;
-	unsigned long sprsurf_offset, linear_offset;
-	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key =
-		&to_intel_plane_state(plane->state)->ckey;
+	u32 sprsurf_offset, linear_offset;
+	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+	int crtc_x = plane_state->dst.x1;
+	int crtc_y = plane_state->dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+	uint32_t x = plane_state->src.x1 >> 16;
+	uint32_t y = plane_state->src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -543,22 +555,20 @@
 	if (crtc_w != src_w || crtc_h != src_h)
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
-	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	sprsurf_offset =
-		intel_gen4_compute_page_offset(dev_priv,
-					       &x, &y, obj->tiling_mode,
-					       pixel_size, fb->pitches[0]);
+	linear_offset = y * fb->pitches[0] + x * cpp;
+	sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+						   fb->modifier[0], cpp,
+						   fb->pitches[0]);
 	linear_offset -= sprsurf_offset;
 
-	if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
 		sprctl |= SPRITE_ROTATE_180;
 
 		/* HSW and BDW does this automagically in hardware */
 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
 			x += src_w;
 			y += src_h;
-			linear_offset += src_h * fb->pitches[0] +
-				src_w * pixel_size;
+			linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 		}
 	}
 
@@ -612,23 +622,28 @@
 }
 
 static void
-ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-		 struct drm_framebuffer *fb,
-		 int crtc_x, int crtc_y,
-		 unsigned int crtc_w, unsigned int crtc_h,
-		 uint32_t x, uint32_t y,
-		 uint32_t src_w, uint32_t src_h)
+ilk_update_plane(struct drm_plane *plane,
+		 const struct intel_crtc_state *crtc_state,
+		 const struct intel_plane_state *plane_state)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	int pipe = intel_plane->pipe;
-	unsigned long dvssurf_offset, linear_offset;
 	u32 dvscntr, dvsscale;
-	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key =
-		&to_intel_plane_state(plane->state)->ckey;
+	u32 dvssurf_offset, linear_offset;
+	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+	int crtc_x = plane_state->dst.x1;
+	int crtc_y = plane_state->dst.y1;
+	uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+	uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+	uint32_t x = plane_state->src.x1 >> 16;
+	uint32_t y = plane_state->src.y1 >> 16;
+	uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+	uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
 	dvscntr = DVS_ENABLE;
 
@@ -677,19 +692,18 @@
 	if (crtc_w != src_w || crtc_h != src_h)
 		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
-	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	dvssurf_offset =
-		intel_gen4_compute_page_offset(dev_priv,
-					       &x, &y, obj->tiling_mode,
-					       pixel_size, fb->pitches[0]);
+	linear_offset = y * fb->pitches[0] + x * cpp;
+	dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+						   fb->modifier[0], cpp,
+						   fb->pitches[0]);
 	linear_offset -= dvssurf_offset;
 
-	if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+	if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
 		dvscntr |= DVS_ROTATE_180;
 
 		x += src_w;
 		y += src_h;
-		linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+		linear_offset += src_h * fb->pitches[0] + src_w * cpp;
 	}
 
 	if (key->flags) {
@@ -754,7 +768,6 @@
 	int hscale, vscale;
 	int max_scale, min_scale;
 	bool can_scale;
-	int pixel_size;
 
 	if (!fb) {
 		state->visible = false;
@@ -876,6 +889,7 @@
 	/* Check size restrictions when scaling */
 	if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
 		unsigned int width_bytes;
+		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
 		WARN_ON(!can_scale);
 
@@ -887,9 +901,7 @@
 		if (src_w < 3 || src_h < 3)
 			state->visible = false;
 
-		pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-		width_bytes = ((src_x * pixel_size) & 63) +
-					src_w * pixel_size;
+		width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
 		if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
 		    width_bytes > 4096 || fb->pitches[0] > 4096)) {
@@ -913,30 +925,6 @@
 	return 0;
 }
 
-static void
-intel_commit_sprite_plane(struct drm_plane *plane,
-			  struct intel_plane_state *state)
-{
-	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_plane *intel_plane = to_intel_plane(plane);
-	struct drm_framebuffer *fb = state->base.fb;
-
-	crtc = crtc ? crtc : plane->crtc;
-
-	if (state->visible) {
-		intel_plane->update_plane(plane, crtc, fb,
-					  state->dst.x1, state->dst.y1,
-					  drm_rect_width(&state->dst),
-					  drm_rect_height(&state->dst),
-					  state->src.x1 >> 16,
-					  state->src.y1 >> 16,
-					  drm_rect_width(&state->src) >> 16,
-					  drm_rect_height(&state->src) >> 16);
-	} else {
-		intel_plane->disable_plane(plane, crtc);
-	}
-}
-
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
@@ -1118,7 +1106,6 @@
 	intel_plane->plane = plane;
 	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
 	intel_plane->check_plane = intel_check_sprite_plane;
-	intel_plane->commit_plane = intel_commit_sprite_plane;
 	possible_crtcs = (1 << pipe);
 	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
 				       &intel_plane_funcs,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 948cbff..6745bad 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -897,6 +897,10 @@
 {
 	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+	if (mode->clock > max_dotclk)
+		return MODE_CLOCK_HIGH;
 
 	/* Ensure TV refresh is close to desired refresh */
 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1178,10 +1182,9 @@
 intel_tv_detect_type(struct intel_tv *intel_tv,
 		      struct drm_connector *connector)
 {
-	struct drm_encoder *encoder = &intel_tv->base.base;
-	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_crtc *crtc = connector->state->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_device *dev = encoder->dev;
+	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tv_ctl, save_tv_ctl;
 	u32 tv_dac, save_tv_dac;
@@ -1230,8 +1233,7 @@
 	I915_WRITE(TV_DAC, tv_dac);
 	POSTING_READ(TV_DAC);
 
-	intel_wait_for_vblank(intel_tv->base.base.dev,
-			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	type = -1;
 	tv_dac = I915_READ(TV_DAC);
@@ -1261,8 +1263,7 @@
 	POSTING_READ(TV_CTL);
 
 	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
-	intel_wait_for_vblank(intel_tv->base.base.dev,
-			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	/* Restore interrupt config */
 	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1420,6 +1421,7 @@
 		if (!mode_ptr)
 			continue;
 		strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+		mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
 
 		mode_ptr->hdisplay = hactive_s;
 		mode_ptr->hsync_start = hactive_s + 1;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 277e60a..436d8f2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -327,13 +327,54 @@
 	}
 }
 
+static bool
+fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+	u32 dbg;
+
+	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
+		return false;
+
+	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+	return true;
+}
+
+static bool
+vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+	u32 cer;
+
+	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
+		return false;
+
+	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+
+	return true;
+}
+
+static bool
+check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
+		return fpga_check_for_unclaimed_mmio(dev_priv);
+
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		return vlv_check_for_unclaimed_mmio(dev_priv);
+
+	return false;
+}
+
 static void __intel_uncore_early_sanitize(struct drm_device *dev,
 					  bool restore_forcewake)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (HAS_FPGA_DBG_UNCLAIMED(dev))
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	/* clear out unclaimed reg detection bit */
+	if (check_for_unclaimed_mmio(dev_priv))
+		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	/* clear out old GT FIFO errors */
 	if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -359,6 +400,8 @@
 
 void intel_uncore_sanitize(struct drm_device *dev)
 {
+	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
 	/* BIOS often leaves RC6 enabled, but disable it for hw init */
 	intel_disable_gt_powersave(dev);
 }
@@ -585,38 +628,38 @@
 }
 
 static void
-hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
-			i915_reg_t reg, bool read, bool before)
+__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+		      const i915_reg_t reg,
+		      const bool read,
+		      const bool before)
 {
-	const char *op = read ? "reading" : "writing to";
-	const char *when = before ? "before" : "after";
-
-	if (!i915.mmio_debug)
+	/* XXX. We limit the auto arming traces for mmio
+	 * debugs on these platforms. There are just too many
+	 * revealed by these and CI/Bat suffers from the noise.
+	 * Please fix and then re-enable the automatic traces.
+	 */
+	if (i915.mmio_debug < 2 &&
+	    (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
 		return;
 
-	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
-		     when, op, i915_mmio_reg_offset(reg));
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	if (WARN(check_for_unclaimed_mmio(dev_priv),
+		 "Unclaimed register detected %s %s register 0x%x\n",
+		 before ? "before" : "after",
+		 read ? "reading" : "writing to",
+		 i915_mmio_reg_offset(reg)))
 		i915.mmio_debug--; /* Only report the first N failures */
-	}
 }
 
-static void
-hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
+static inline void
+unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+		    const i915_reg_t reg,
+		    const bool read,
+		    const bool before)
 {
-	static bool mmio_debug_once = true;
-
-	if (i915.mmio_debug || !mmio_debug_once)
+	if (likely(!i915.mmio_debug))
 		return;
 
-	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-		DRM_DEBUG("Unclaimed register detected, "
-			  "enabling oneshot unclaimed register reporting. "
-			  "Please use i915.mmio_debug=N for more information.\n");
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-		i915.mmio_debug = mmio_debug_once--;
-	}
+	__unclaimed_reg_debug(dev_priv, reg, read, before);
 }
 
 #define GEN2_READ_HEADER(x) \
@@ -664,9 +707,11 @@
 	unsigned long irqflags; \
 	u##x val = 0; \
 	assert_rpm_wakelock_held(dev_priv); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+	unclaimed_reg_debug(dev_priv, reg, true, true)
 
 #define GEN6_READ_FOOTER \
+	unclaimed_reg_debug(dev_priv, reg, true, false); \
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
 	return val
@@ -699,11 +744,9 @@
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	GEN6_READ_HEADER(x); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
 	if (NEEDS_FORCE_WAKE(offset)) \
 		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
 	val = __raw_i915_read##x(dev_priv, reg); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
 	GEN6_READ_FOOTER; \
 }
 
@@ -751,7 +794,6 @@
 gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_READ_HEADER(x); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
 	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
 		fw_engine = 0; \
 	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
@@ -765,7 +807,6 @@
 	if (fw_engine) \
 		__force_wake_get(dev_priv, fw_engine); \
 	val = __raw_i915_read##x(dev_priv, reg); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
 	GEN6_READ_FOOTER; \
 }
 
@@ -864,9 +905,11 @@
 	unsigned long irqflags; \
 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
 	assert_rpm_wakelock_held(dev_priv); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+	unclaimed_reg_debug(dev_priv, reg, false, true)
 
 #define GEN6_WRITE_FOOTER \
+	unclaimed_reg_debug(dev_priv, reg, false, false); \
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
 #define __gen6_write(x) \
@@ -892,13 +935,10 @@
 	if (NEEDS_FORCE_WAKE(offset)) { \
 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
 	} \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (unlikely(__fifo_ret)) { \
 		gen6_gt_check_fifodbg(dev_priv); \
 	} \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-	hsw_unclaimed_reg_detect(dev_priv); \
 	GEN6_WRITE_FOOTER; \
 }
 
@@ -928,12 +968,9 @@
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
 	GEN6_WRITE_HEADER; \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
 	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
 		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
 	__raw_i915_write##x(dev_priv, reg, val); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-	hsw_unclaimed_reg_detect(dev_priv); \
 	GEN6_WRITE_FOOTER; \
 }
 
@@ -987,7 +1024,6 @@
 		bool trace) { \
 	enum forcewake_domains fw_engine; \
 	GEN6_WRITE_HEADER; \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
 	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
 	    is_gen9_shadowed(dev_priv, reg)) \
 		fw_engine = 0; \
@@ -1002,8 +1038,6 @@
 	if (fw_engine) \
 		__force_wake_get(dev_priv, fw_engine); \
 	__raw_i915_write##x(dev_priv, reg, val); \
-	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-	hsw_unclaimed_reg_detect(dev_priv); \
 	GEN6_WRITE_FOOTER; \
 }
 
@@ -1223,6 +1257,8 @@
 	intel_uncore_fw_domains_init(dev);
 	__intel_uncore_early_sanitize(dev, false);
 
+	dev_priv->uncore.unclaimed_mmio_check = 1;
+
 	switch (INTEL_INFO(dev)->gen) {
 	default:
 	case 9:
@@ -1580,13 +1616,26 @@
 	return intel_get_gpu_reset(dev) != NULL;
 }
 
-void intel_uncore_check_errors(struct drm_device *dev)
+bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	return check_for_unclaimed_mmio(dev_priv);
+}
 
-	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
-	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-		DRM_ERROR("Unclaimed register before interrupt\n");
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+bool
+intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+{
+	if (unlikely(i915.mmio_debug ||
+		     dev_priv->uncore.unclaimed_mmio_check <= 0))
+		return false;
+
+	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+		DRM_DEBUG("Unclaimed register detected, "
+			  "enabling oneshot unclaimed register reporting. "
+			  "Please use i915.mmio_debug=N for more information.\n");
+		i915.mmio_debug++;
+		dev_priv->uncore.unclaimed_mmio_check--;
+		return true;
 	}
+
+	return false;
 }
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 063825f..2a95d10 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -109,13 +109,6 @@
 {
 }
 
-static bool dw_hdmi_imx_encoder_mode_fixup(struct drm_encoder *encoder,
-					   const struct drm_display_mode *mode,
-					   struct drm_display_mode *adj_mode)
-{
-	return true;
-}
-
 static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
 					 struct drm_display_mode *mode,
 					 struct drm_display_mode *adj_mode)
@@ -125,7 +118,7 @@
 static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
 {
 	struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
-	int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+	int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
 
 	regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
 			   IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
@@ -138,7 +131,6 @@
 }
 
 static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
-	.mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
 	.mode_set   = dw_hdmi_imx_encoder_mode_set,
 	.prepare    = dw_hdmi_imx_encoder_prepare,
 	.commit     = dw_hdmi_imx_encoder_commit,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 2f57d79..9876e0f 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -17,7 +17,6 @@
 #include <linux/device.h>
 #include <linux/fb.h>
 #include <linux/module.h>
-#include <linux/of_graph.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
@@ -171,18 +170,6 @@
 	imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc);
 }
 
-static void imx_drm_driver_preclose(struct drm_device *drm,
-		struct drm_file *file)
-{
-	int i;
-
-	if (!file->is_master)
-		return;
-
-	for (i = 0; i < MAX_CRTC; i++)
-		imx_drm_disable_vblank(drm, i);
-}
-
 static const struct file_operations imx_drm_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
@@ -424,36 +411,6 @@
 }
 EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
 
-/*
- * @node: device tree node containing encoder input ports
- * @encoder: drm_encoder
- */
-int imx_drm_encoder_get_mux_id(struct device_node *node,
-			       struct drm_encoder *encoder)
-{
-	struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
-	struct device_node *ep;
-	struct of_endpoint endpoint;
-	struct device_node *port;
-	int ret;
-
-	if (!node || !imx_crtc)
-		return -EINVAL;
-
-	for_each_endpoint_of_node(node, ep) {
-		port = of_graph_get_remote_port(ep);
-		of_node_put(port);
-		if (port == imx_crtc->crtc->port) {
-			ret = of_graph_parse_endpoint(ep, &endpoint);
-			of_node_put(ep);
-			return ret ? ret : endpoint.port;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
-
 static const struct drm_ioctl_desc imx_drm_ioctls[] = {
 	/* none so far */
 };
@@ -463,7 +420,6 @@
 	.load			= imx_drm_driver_load,
 	.unload			= imx_drm_driver_unload,
 	.lastclose		= imx_drm_driver_lastclose,
-	.preclose		= imx_drm_driver_preclose,
 	.set_busid		= drm_platform_set_busid,
 	.gem_free_object	= drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 71cf6d9..b0241b9 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -46,8 +46,6 @@
 int imx_drm_set_bus_format(struct drm_encoder *encoder,
 		u32 bus_format);
 
-int imx_drm_encoder_get_mux_id(struct device_node *node,
-		struct drm_encoder *encoder);
 int imx_drm_encoder_parse_of(struct drm_device *drm,
 	struct drm_encoder *encoder, struct device_node *np);
 
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 22ac482..a58eee5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -19,6 +19,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
 #include <drm/drm_panel.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -139,13 +140,6 @@
 {
 }
 
-static bool imx_ldb_encoder_mode_fixup(struct drm_encoder *encoder,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
 		unsigned long serial_clk, unsigned long di_clk)
 {
@@ -215,7 +209,7 @@
 	struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
 	struct imx_ldb *ldb = imx_ldb_ch->ldb;
 	int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
-	int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+	int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
 	drm_panel_prepare(imx_ldb_ch->panel);
 
@@ -265,7 +259,7 @@
 	int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
 	unsigned long serial_clk;
 	unsigned long di_clk = mode->clock * 1000;
-	int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+	int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
 	if (mode->clock > 170000) {
 		dev_warn(ldb->dev,
@@ -376,7 +370,6 @@
 
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
 	.dpms = imx_ldb_encoder_dpms,
-	.mode_fixup = imx_ldb_encoder_mode_fixup,
 	.prepare = imx_ldb_encoder_prepare,
 	.commit = imx_ldb_encoder_commit,
 	.mode_set = imx_ldb_encoder_mode_set,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 292349f..ae7a9fb 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -286,13 +286,6 @@
 		dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
 }
 
-static bool imx_tve_encoder_mode_fixup(struct drm_encoder *encoder,
-				       const struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct imx_tve *tve = enc_to_tve(encoder);
@@ -379,7 +372,6 @@
 
 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
 	.dpms = imx_tve_encoder_dpms,
-	.mode_fixup = imx_tve_encoder_mode_fixup,
 	.prepare = imx_tve_encoder_prepare,
 	.mode_set = imx_tve_encoder_mode_set,
 	.commit = imx_tve_encoder_commit,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 2872263..dee8e8b 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -22,6 +22,8 @@
 #include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
+#include <linux/reservation.h>
+#include <linux/dma-buf.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 
@@ -31,6 +33,23 @@
 
 #define DRIVER_DESC		"i.MX IPUv3 Graphics"
 
+enum ipu_flip_status {
+	IPU_FLIP_NONE,
+	IPU_FLIP_PENDING,
+	IPU_FLIP_SUBMITTED,
+};
+
+struct ipu_flip_work {
+	struct work_struct		unref_work;
+	struct drm_gem_object		*bo;
+	struct drm_pending_vblank_event *page_flip_event;
+	struct work_struct		fence_work;
+	struct ipu_crtc			*crtc;
+	struct fence			*excl;
+	unsigned			shared_count;
+	struct fence			**shared;
+};
+
 struct ipu_crtc {
 	struct device		*dev;
 	struct drm_crtc		base;
@@ -42,8 +61,9 @@
 	struct ipu_dc		*dc;
 	struct ipu_di		*di;
 	int			enabled;
-	struct drm_pending_vblank_event *page_flip_event;
-	struct drm_framebuffer	*newfb;
+	enum ipu_flip_status	flip_state;
+	struct workqueue_struct *flip_queue;
+	struct ipu_flip_work	*flip_work;
 	int			irq;
 	u32			bus_format;
 	int			di_hsync_pin;
@@ -104,15 +124,45 @@
 	}
 }
 
+static void ipu_flip_unref_work_func(struct work_struct *__work)
+{
+	struct ipu_flip_work *work =
+			container_of(__work, struct ipu_flip_work, unref_work);
+
+	drm_gem_object_unreference_unlocked(work->bo);
+	kfree(work);
+}
+
+static void ipu_flip_fence_work_func(struct work_struct *__work)
+{
+	struct ipu_flip_work *work =
+			container_of(__work, struct ipu_flip_work, fence_work);
+	int i;
+
+	/* wait for all fences attached to the FB obj to signal */
+	if (work->excl) {
+		fence_wait(work->excl, false);
+		fence_put(work->excl);
+	}
+	for (i = 0; i < work->shared_count; i++) {
+		fence_wait(work->shared[i], false);
+		fence_put(work->shared[i]);
+	}
+
+	work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+}
+
 static int ipu_page_flip(struct drm_crtc *crtc,
 		struct drm_framebuffer *fb,
 		struct drm_pending_vblank_event *event,
 		uint32_t page_flip_flags)
 {
+	struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
 	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+	struct ipu_flip_work *flip_work;
 	int ret;
 
-	if (ipu_crtc->newfb)
+	if (ipu_crtc->flip_state != IPU_FLIP_NONE)
 		return -EBUSY;
 
 	ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
@@ -123,11 +173,58 @@
 		return ret;
 	}
 
-	ipu_crtc->newfb = fb;
-	ipu_crtc->page_flip_event = event;
-	crtc->primary->fb = fb;
+	flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
+	if (!flip_work) {
+		ret = -ENOMEM;
+		goto put_vblank;
+	}
+	INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
+	flip_work->page_flip_event = event;
+
+	/* get BO backing the old framebuffer and take a reference */
+	flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
+	drm_gem_object_reference(flip_work->bo);
+
+	ipu_crtc->flip_work = flip_work;
+	/*
+	 * If the object has a DMABUF attached, we need to wait on its fences
+	 * if there are any.
+	 */
+	if (cma_obj->base.dma_buf) {
+		INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
+		flip_work->crtc = ipu_crtc;
+
+		ret = reservation_object_get_fences_rcu(
+				cma_obj->base.dma_buf->resv, &flip_work->excl,
+				&flip_work->shared_count, &flip_work->shared);
+
+		if (unlikely(ret)) {
+			DRM_ERROR("failed to get fences for buffer\n");
+			goto free_flip_work;
+		}
+
+		/* No need to queue the worker if the are no fences */
+		if (!flip_work->excl && !flip_work->shared_count) {
+			ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
+		} else {
+			ipu_crtc->flip_state = IPU_FLIP_PENDING;
+			queue_work(ipu_crtc->flip_queue,
+				   &flip_work->fence_work);
+		}
+	} else {
+		ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
+	}
 
 	return 0;
+
+free_flip_work:
+	drm_gem_object_unreference_unlocked(flip_work->bo);
+	kfree(flip_work);
+	ipu_crtc->flip_work = NULL;
+put_vblank:
+	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
+
+	return ret;
 }
 
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
@@ -211,12 +308,12 @@
 {
 	unsigned long flags;
 	struct drm_device *drm = ipu_crtc->base.dev;
+	struct ipu_flip_work *work = ipu_crtc->flip_work;
 
 	spin_lock_irqsave(&drm->event_lock, flags);
-	if (ipu_crtc->page_flip_event)
+	if (work->page_flip_event)
 		drm_crtc_send_vblank_event(&ipu_crtc->base,
-					   ipu_crtc->page_flip_event);
-	ipu_crtc->page_flip_event = NULL;
+					   work->page_flip_event);
 	imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 }
@@ -227,13 +324,15 @@
 
 	imx_drm_handle_vblank(ipu_crtc->imx_crtc);
 
-	if (ipu_crtc->newfb) {
+	if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
 		struct ipu_plane *plane = ipu_crtc->plane[0];
 
-		ipu_crtc->newfb = NULL;
 		ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
 				   plane->x, plane->y);
 		ipu_crtc_handle_pageflip(ipu_crtc);
+		queue_work(ipu_crtc->flip_queue,
+			   &ipu_crtc->flip_work->unref_work);
+		ipu_crtc->flip_state = IPU_FLIP_NONE;
 	}
 
 	return IRQ_HANDLED;
@@ -282,6 +381,10 @@
 
 static int ipu_enable_vblank(struct drm_crtc *crtc)
 {
+	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
+	enable_irq(ipu_crtc->irq);
+
 	return 0;
 }
 
@@ -289,8 +392,7 @@
 {
 	struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 
-	ipu_crtc->page_flip_event = NULL;
-	ipu_crtc->newfb = NULL;
+	disable_irq_nosync(ipu_crtc->irq);
 }
 
 static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
@@ -401,6 +503,10 @@
 		dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
 		goto err_put_plane_res;
 	}
+	/* Only enable IRQ when we actually need it to trigger work. */
+	disable_irq(ipu_crtc->irq);
+
+	ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
 
 	return 0;
 
@@ -443,6 +549,7 @@
 
 	imx_drm_remove_crtc(ipu_crtc->imx_crtc);
 
+	destroy_workqueue(ipu_crtc->flip_queue);
 	ipu_plane_put_resources(ipu_crtc->plane[0]);
 	ipu_put_resources(ipu_crtc);
 }
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 26bb1b6..5888278 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -339,7 +339,7 @@
 	}
 
 	if (crtc != plane->crtc)
-		dev_info(plane->dev->dev, "crtc change: %p -> %p\n",
+		dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
 				plane->crtc, crtc);
 	plane->crtc = crtc;
 
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 0ffef17..363e2c7 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -112,13 +112,6 @@
 		drm_panel_enable(imxpd->panel);
 }
 
-static bool imx_pd_encoder_mode_fixup(struct drm_encoder *encoder,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -166,7 +159,6 @@
 
 static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
 	.dpms = imx_pd_encoder_dpms,
-	.mode_fixup = imx_pd_encoder_mode_fixup,
 	.prepare = imx_pd_encoder_prepare,
 	.commit = imx_pd_encoder_commit,
 	.mode_set = imx_pd_encoder_mode_set,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index dc13c48..14e64e0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -92,18 +92,6 @@
 	} while ((status & 0x01) && time_before(jiffies, timeout));
 }
 
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 #define P_ARRAY_SIZE 9
 
 static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
@@ -1410,7 +1398,6 @@
 static const struct drm_crtc_helper_funcs mga_helper_funcs = {
 	.disable = mga_crtc_disable,
 	.dpms = mga_crtc_dpms,
-	.mode_fixup = mga_crtc_mode_fixup,
 	.mode_set = mga_crtc_mode_set,
 	.mode_set_base = mga_crtc_mode_set_base,
 	.prepare = mga_crtc_prepare,
@@ -1479,13 +1466,6 @@
  * These functions are analagous to those in the CRTC code, but are intended
  * to handle any encoder-specific limitations
  */
-static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
-				   const struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mga_encoder_mode_set(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
@@ -1515,7 +1495,6 @@
 
 static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
 	.dpms = mga_encoder_dpms,
-	.mode_fixup = mga_encoder_mode_fixup,
 	.mode_set = mga_encoder_mode_set,
 	.prepare = mga_encoder_prepare,
 	.commit = mga_encoder_commit,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 065ad41..ddb4c9d 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -12,6 +12,7 @@
 	hdmi/hdmi_connector.o \
 	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
+	hdmi/hdmi_phy.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
 	hdmi/hdmi_phy_8x74.o \
@@ -52,6 +53,8 @@
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
 msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
 			mdp/mdp4/mdp4_dsi_encoder.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9e2aceb..fee2429 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -9,16 +9,17 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 97dc1c6..27dabd5e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -9,16 +9,17 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -111,10 +112,14 @@
 	VFMT_8_8_SNORM = 53,
 	VFMT_8_8_8_SNORM = 54,
 	VFMT_8_8_8_8_SNORM = 55,
-	VFMT_10_10_10_2_UINT = 60,
-	VFMT_10_10_10_2_UNORM = 61,
-	VFMT_10_10_10_2_SINT = 62,
-	VFMT_10_10_10_2_SNORM = 63,
+	VFMT_10_10_10_2_UINT = 56,
+	VFMT_10_10_10_2_UNORM = 57,
+	VFMT_10_10_10_2_SINT = 58,
+	VFMT_10_10_10_2_SNORM = 59,
+	VFMT_2_10_10_10_UINT = 60,
+	VFMT_2_10_10_10_UNORM = 61,
+	VFMT_2_10_10_10_SINT = 62,
+	VFMT_2_10_10_10_SNORM = 63,
 };
 
 enum a3xx_tex_fmt {
@@ -138,10 +143,12 @@
 	TFMT_DXT1 = 36,
 	TFMT_DXT3 = 37,
 	TFMT_DXT5 = 38,
+	TFMT_2_10_10_10_UNORM = 40,
 	TFMT_10_10_10_2_UNORM = 41,
 	TFMT_9_9_9_E5_FLOAT = 42,
 	TFMT_11_11_10_FLOAT = 43,
 	TFMT_A8_UNORM = 44,
+	TFMT_L8_UNORM = 45,
 	TFMT_L8_A8_UNORM = 47,
 	TFMT_8_UNORM = 48,
 	TFMT_8_8_UNORM = 49,
@@ -183,6 +190,8 @@
 	TFMT_32_SINT = 92,
 	TFMT_32_32_SINT = 93,
 	TFMT_32_32_32_32_SINT = 95,
+	TFMT_2_10_10_10_UINT = 96,
+	TFMT_10_10_10_2_UINT = 97,
 	TFMT_ETC2_RG11_SNORM = 112,
 	TFMT_ETC2_RG11_UNORM = 113,
 	TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +224,9 @@
 	RB_R8_UINT = 14,
 	RB_R8_SINT = 15,
 	RB_R10G10B10A2_UNORM = 16,
+	RB_A2R10G10B10_UNORM = 17,
+	RB_R10G10B10A2_UINT = 18,
+	RB_A2R10G10B10_UINT = 19,
 	RB_A8_UNORM = 20,
 	RB_R8_UNORM = 21,
 	RB_R16_FLOAT = 24,
@@ -244,30 +256,273 @@
 	RB_R32G32B32A32_UINT = 59,
 };
 
-enum a3xx_sp_perfcounter_select {
-	SP_FS_CFLOW_INSTRUCTIONS = 12,
-	SP_FS_FULL_ALU_INSTRUCTIONS = 14,
-	SP0_ICL1_MISSES = 26,
-	SP_ALU_ACTIVE_CYCLES = 29,
+enum a3xx_cp_perfcounter_select {
+	CP_ALWAYS_COUNT = 0,
+	CP_AHB_PFPTRANS_WAIT = 3,
+	CP_AHB_NRTTRANS_WAIT = 6,
+	CP_CSF_NRT_READ_WAIT = 8,
+	CP_CSF_I1_FIFO_FULL = 9,
+	CP_CSF_I2_FIFO_FULL = 10,
+	CP_CSF_ST_FIFO_FULL = 11,
+	CP_RESERVED_12 = 12,
+	CP_CSF_RING_ROQ_FULL = 13,
+	CP_CSF_I1_ROQ_FULL = 14,
+	CP_CSF_I2_ROQ_FULL = 15,
+	CP_CSF_ST_ROQ_FULL = 16,
+	CP_RESERVED_17 = 17,
+	CP_MIU_TAG_MEM_FULL = 18,
+	CP_MIU_NRT_WRITE_STALLED = 22,
+	CP_MIU_NRT_READ_STALLED = 23,
+	CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+	CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+	CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+	CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+	CP_ME_MICRO_RB_STARVED = 30,
+	CP_AHB_RBBM_DWORD_SENT = 40,
+	CP_ME_BUSY_CLOCKS = 41,
+	CP_ME_WAIT_CONTEXT_AVAIL = 42,
+	CP_PFP_TYPE0_PACKET = 43,
+	CP_PFP_TYPE3_PACKET = 44,
+	CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+	CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+	CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+	CP_CSF_RBI1I2_FETCHING = 48,
 };
 
-enum a3xx_rop_code {
-	ROP_CLEAR = 0,
-	ROP_NOR = 1,
-	ROP_AND_INVERTED = 2,
-	ROP_COPY_INVERTED = 3,
-	ROP_AND_REVERSE = 4,
-	ROP_INVERT = 5,
-	ROP_XOR = 6,
-	ROP_NAND = 7,
-	ROP_AND = 8,
-	ROP_EQUIV = 9,
-	ROP_NOOP = 10,
-	ROP_OR_INVERTED = 11,
-	ROP_COPY = 12,
-	ROP_OR_REVERSE = 13,
-	ROP_OR = 14,
-	ROP_SET = 15,
+enum a3xx_gras_tse_perfcounter_select {
+	GRAS_TSEPERF_INPUT_PRIM = 0,
+	GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+	GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+	GRAS_TSEPERF_CLIPPED_PRIM = 3,
+	GRAS_TSEPERF_NEW_PRIM = 4,
+	GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+	GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+	GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+	GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+	GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+	GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+	GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+	GRAS_TSEPERF_WORKING_CYCLES = 12,
+	GRAS_TSEPERF_PC_STARVE = 13,
+	GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+	GRAS_RASPERF_16X16_TILES = 0,
+	GRAS_RASPERF_8X8_TILES = 1,
+	GRAS_RASPERF_4X4_TILES = 2,
+	GRAS_RASPERF_WORKING_CYCLES = 3,
+	GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+	GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+	GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+	HLSQ_PERF_SP_VS_CONSTANT = 0,
+	HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+	HLSQ_PERF_SP_FS_CONSTANT = 2,
+	HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+	HLSQ_PERF_TP_STATE = 4,
+	HLSQ_PERF_QUADS = 5,
+	HLSQ_PERF_PIXELS = 6,
+	HLSQ_PERF_VERTICES = 7,
+	HLSQ_PERF_FS8_THREADS = 8,
+	HLSQ_PERF_FS16_THREADS = 9,
+	HLSQ_PERF_FS32_THREADS = 10,
+	HLSQ_PERF_VS8_THREADS = 11,
+	HLSQ_PERF_VS16_THREADS = 12,
+	HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+	HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+	HLSQ_PERF_ACTIVE_CYCLES = 15,
+	HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+	HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+	HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+	HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+	HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+	HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+	HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+	HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+	HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+	HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+	HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+	HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+	HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+	PC_PCPERF_VISIBILITY_STREAMS = 0,
+	PC_PCPERF_TOTAL_INSTANCES = 1,
+	PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+	PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+	PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+	PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+	PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+	PC_PCPERF_VERTICES_TO_VFD = 7,
+	PC_PCPERF_REUSED_VERTICES = 8,
+	PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+	PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+	PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+	PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+	RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+	RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+	RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+	RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+	RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+	RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+	RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+	RB_RBPERF_RB_MARB_DATA = 7,
+	RB_RBPERF_SP_RB_QUAD = 8,
+	RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+	RB_RBPERF_GMEM_CH0_READ = 10,
+	RB_RBPERF_GMEM_CH1_READ = 11,
+	RB_RBPERF_GMEM_CH0_WRITE = 12,
+	RB_RBPERF_GMEM_CH1_WRITE = 13,
+	RB_RBPERF_CP_CONTEXT_DONE = 14,
+	RB_RBPERF_CP_CACHE_FLUSH = 15,
+	RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+	RBBM_ALAWYS_ON = 0,
+	RBBM_VBIF_BUSY = 1,
+	RBBM_TSE_BUSY = 2,
+	RBBM_RAS_BUSY = 3,
+	RBBM_PC_DCALL_BUSY = 4,
+	RBBM_PC_VSD_BUSY = 5,
+	RBBM_VFD_BUSY = 6,
+	RBBM_VPC_BUSY = 7,
+	RBBM_UCHE_BUSY = 8,
+	RBBM_VSC_BUSY = 9,
+	RBBM_HLSQ_BUSY = 10,
+	RBBM_ANY_RB_BUSY = 11,
+	RBBM_ANY_TEX_BUSY = 12,
+	RBBM_ANY_USP_BUSY = 13,
+	RBBM_ANY_MARB_BUSY = 14,
+	RBBM_ANY_ARB_BUSY = 15,
+	RBBM_AHB_STATUS_BUSY = 16,
+	RBBM_AHB_STATUS_STALLED = 17,
+	RBBM_AHB_STATUS_TXFR = 18,
+	RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+	RBBM_AHB_STATUS_TXFR_ERROR = 20,
+	RBBM_AHB_STATUS_LONG_STALL = 21,
+	RBBM_RBBM_STATUS_MASKED = 22,
+};
+
+enum a3xx_sp_perfcounter_select {
+	SP_LM_LOAD_INSTRUCTIONS = 0,
+	SP_LM_STORE_INSTRUCTIONS = 1,
+	SP_LM_ATOMICS = 2,
+	SP_UCHE_LOAD_INSTRUCTIONS = 3,
+	SP_UCHE_STORE_INSTRUCTIONS = 4,
+	SP_UCHE_ATOMICS = 5,
+	SP_VS_TEX_INSTRUCTIONS = 6,
+	SP_VS_CFLOW_INSTRUCTIONS = 7,
+	SP_VS_EFU_INSTRUCTIONS = 8,
+	SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+	SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+	SP_FS_TEX_INSTRUCTIONS = 11,
+	SP_FS_CFLOW_INSTRUCTIONS = 12,
+	SP_FS_EFU_INSTRUCTIONS = 13,
+	SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+	SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+	SP_FS_BARY_INSTRUCTIONS = 16,
+	SP_VS_INSTRUCTIONS = 17,
+	SP_FS_INSTRUCTIONS = 18,
+	SP_ADDR_LOCK_COUNT = 19,
+	SP_UCHE_READ_TRANS = 20,
+	SP_UCHE_WRITE_TRANS = 21,
+	SP_EXPORT_VPC_TRANS = 22,
+	SP_EXPORT_RB_TRANS = 23,
+	SP_PIXELS_KILLED = 24,
+	SP_ICL1_REQUESTS = 25,
+	SP_ICL1_MISSES = 26,
+	SP_ICL0_REQUESTS = 27,
+	SP_ICL0_MISSES = 28,
+	SP_ALU_ACTIVE_CYCLES = 29,
+	SP_EFU_ACTIVE_CYCLES = 30,
+	SP_STALL_CYCLES_BY_VPC = 31,
+	SP_STALL_CYCLES_BY_TP = 32,
+	SP_STALL_CYCLES_BY_UCHE = 33,
+	SP_STALL_CYCLES_BY_RB = 34,
+	SP_ACTIVE_CYCLES_ANY = 35,
+	SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+	TPL1_TPPERF_L1_REQUESTS = 0,
+	TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+	TPL1_TPPERF_TP0_L1_MISSES = 2,
+	TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+	TPL1_TPPERF_TP1_L1_MISSES = 4,
+	TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+	TPL1_TPPERF_TP2_L1_MISSES = 6,
+	TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+	TPL1_TPPERF_TP3_L1_MISSES = 8,
+	TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+	TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+	TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+	TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+	TPL1_TPPERF_BILINEAR_OPS = 13,
+	TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+	TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+	TPL1_TPPERF_QUADS_ARRAY = 16,
+	TPL1_TPPERF_QUADS_PROJECTION = 17,
+	TPL1_TPPERF_QUADS_GRADIENT = 18,
+	TPL1_TPPERF_QUADS_1D2D = 19,
+	TPL1_TPPERF_QUADS_3DCUBE = 20,
+	TPL1_TPPERF_ZERO_LOD = 21,
+	TPL1_TPPERF_OUTPUT_TEXELS = 22,
+	TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+	TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+	TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+	TPL1_TPPERF_LATENCY = 26,
+	TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+	VFD_PERF_UCHE_BYTE_FETCHED = 0,
+	VFD_PERF_UCHE_TRANS = 1,
+	VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+	VFD_PERF_FETCH_INSTRUCTIONS = 3,
+	VFD_PERF_DECODE_INSTRUCTIONS = 4,
+	VFD_PERF_ACTIVE_CYCLES = 5,
+	VFD_PERF_STALL_CYCLES_UCHE = 6,
+	VFD_PERF_STALL_CYCLES_HLSQ = 7,
+	VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+	VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
+};
+
+enum a3xx_vpc_perfcounter_select {
+	VPC_PERF_SP_LM_PRIMITIVES = 0,
+	VPC_PERF_COMPONENTS_FROM_SP = 1,
+	VPC_PERF_SP_LM_COMPONENTS = 2,
+	VPC_PERF_ACTIVE_CYCLES = 3,
+	VPC_PERF_STALL_CYCLES_LM = 4,
+	VPC_PERF_STALL_CYCLES_RAS = 5,
+};
+
+enum a3xx_uche_perfcounter_select {
+	UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+	UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+	UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+	UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+	UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+	UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+	UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+	UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+	UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+	UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+	UCHE_UCHEPERF_EVICTS = 16,
+	UCHE_UCHEPERF_FLUSHES = 17,
+	UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+	UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+	UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
 };
 
 enum a3xx_rb_blend_opcode {
@@ -1429,15 +1684,23 @@
 #define REG_A3XX_PC_RESTART_INDEX				0x000021ed
 
 #define REG_A3XX_HLSQ_CONTROL_0_REG				0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000030
 #define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT		4
 static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
 {
 	return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
 }
 #define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE		0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE			0x00000100
 #define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART			0x00000200
 #define A3XX_HLSQ_CONTROL_0_REG_RESERVED2			0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK	0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT	12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX			0x02000000
 #define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE			0x04000000
 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK			0x08000000
 #define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT		27
@@ -1451,17 +1714,39 @@
 #define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT			0x80000000
 
 #define REG_A3XX_HLSQ_CONTROL_1_REG				0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK		0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK		0x000000c0
 #define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT		6
 static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
 {
 	return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
 }
 #define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE		0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1			0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD				0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK		0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT		16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK		0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT		24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
 
 #define REG_A3XX_HLSQ_CONTROL_2_REG				0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK		0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT		2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK		0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT		18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+	return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK	0xfc000000
 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT	26
 static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1763,13 @@
 }
 
 #define REG_A3XX_HLSQ_VS_CONTROL_REG				0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK		0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK		0x000003ff
 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT		0
 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
 }
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x001ff000
 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT	12
 static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
 {
@@ -1498,13 +1783,13 @@
 }
 
 #define REG_A3XX_HLSQ_FS_CONTROL_REG				0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK		0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK		0x000003ff
 #define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT		0
 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
 }
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK		0x001ff000
 #define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT	12
 static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
 {
@@ -1518,13 +1803,13 @@
 }
 
 #define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG			0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK	0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK	0x000001ff
 #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT	0
 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
 }
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK	0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK	0x01ff0000
 #define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT	16
 static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
 {
@@ -1532,13 +1817,13 @@
 }
 
 #define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG			0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK	0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK	0x000001ff
 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT	0
 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
 {
 	return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
 }
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK	0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK	0x01ff0000
 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT	16
 static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
 {
@@ -1620,12 +1905,24 @@
 }
 
 #define REG_A3XX_VFD_CONTROL_1					0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK			0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK			0x0000000f
 #define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT			0
 static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
 {
 	return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
 }
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK			0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT			4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+	return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK			0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT			8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+	return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
 #define A3XX_VFD_CONTROL_1_REGID4VTX__MASK			0x00ff0000
 #define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT			16
 static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2305,19 @@
 	return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
 }
 #define A3XX_SP_VS_CTRL_REG0_CACHEINVALID			0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE				0x00000008
 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
 #define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK		0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT		18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
-	return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
 #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK			0x00100000
 #define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT			20
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2325,6 @@
 	return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
 }
 #define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE			0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE			0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE			0x00800000
 #define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK			0xff000000
 #define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT			24
 static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2365,8 @@
 {
 	return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
 }
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK		0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE				0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK		0x01f00000
 #define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT		20
 static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
 {
@@ -2085,24 +2376,26 @@
 static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
 
 static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK			0x000000ff
 #define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT			0
 static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
 }
+#define A3XX_SP_VS_OUT_REG_A_HALF				0x00000100
 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK			0x00001e00
 #define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT			9
 static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
 }
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK			0x00ff0000
 #define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT			16
 static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
 }
+#define A3XX_SP_VS_OUT_REG_B_HALF				0x01000000
 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK			0x1e000000
 #define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT			25
 static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2406,25 @@
 static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
 
 static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK			0x0000007f
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT			0
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK			0x00007f00
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT			8
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK			0x007f0000
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT			16
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
 {
 	return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
 }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK			0x7f000000
 #define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT			24
 static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
 {
@@ -2139,6 +2432,12 @@
 }
 
 #define REG_A3XX_SP_VS_OBJ_OFFSET_REG				0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK	0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT	0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
 #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
 static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2454,38 @@
 #define REG_A3XX_SP_VS_OBJ_START_REG				0x000022d5
 
 #define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG			0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK	0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT	0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK	0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT	8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK	0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT	24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
 
 #define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG				0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK		0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT		0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+	return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK	0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT	5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+	return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
 
 #define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG				0x000022d8
 
@@ -2182,24 +2511,22 @@
 	return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
 }
 #define A3XX_SP_FS_CTRL_REG0_CACHEINVALID			0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE				0x00000008
 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK		0x000003f0
 #define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT		4
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK		0x0000fc00
 #define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT		10
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK		0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT		18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
-	return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE			0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP			0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED				0x00080000
 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK			0x00100000
 #define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT			20
 static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2562,7 @@
 {
 	return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
 }
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK		0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK		0x7f000000
 #define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT		24
 static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
 {
@@ -2243,6 +2570,12 @@
 }
 
 #define REG_A3XX_SP_FS_OBJ_OFFSET_REG				0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK	0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT	0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
 #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
 static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2592,38 @@
 #define REG_A3XX_SP_FS_OBJ_START_REG				0x000022e3
 
 #define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG			0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK	0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT	0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK	0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT	8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK	0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT	24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
 
 #define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG				0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK		0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT		0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+	return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK	0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT	5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+	return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
 
 #define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG				0x000022e6
 
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 99de827..3220b91 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -9,16 +9,17 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -47,11 +48,13 @@
 	RB4_R8_UNORM = 2,
 	RB4_R4G4B4A4_UNORM = 8,
 	RB4_R5G5B5A1_UNORM = 10,
-	RB4_R5G6R5_UNORM = 14,
+	RB4_R5G6B5_UNORM = 14,
 	RB4_R8G8_UNORM = 15,
 	RB4_R8G8_SNORM = 16,
 	RB4_R8G8_UINT = 17,
 	RB4_R8G8_SINT = 18,
+	RB4_R16_UNORM = 19,
+	RB4_R16_SNORM = 20,
 	RB4_R16_FLOAT = 21,
 	RB4_R16_UINT = 22,
 	RB4_R16_SINT = 23,
@@ -63,12 +66,16 @@
 	RB4_R10G10B10A2_UNORM = 31,
 	RB4_R10G10B10A2_UINT = 34,
 	RB4_R11G11B10_FLOAT = 39,
+	RB4_R16G16_UNORM = 40,
+	RB4_R16G16_SNORM = 41,
 	RB4_R16G16_FLOAT = 42,
 	RB4_R16G16_UINT = 43,
 	RB4_R16G16_SINT = 44,
 	RB4_R32_FLOAT = 45,
 	RB4_R32_UINT = 46,
 	RB4_R32_SINT = 47,
+	RB4_R16G16B16A16_UNORM = 52,
+	RB4_R16G16B16A16_SNORM = 53,
 	RB4_R16G16B16A16_FLOAT = 54,
 	RB4_R16G16B16A16_UINT = 55,
 	RB4_R16G16B16A16_SINT = 56,
@@ -106,6 +113,7 @@
 	VFMT4_32_32_FIXED = 10,
 	VFMT4_32_32_32_FIXED = 11,
 	VFMT4_32_32_32_32_FIXED = 12,
+	VFMT4_11_11_10_FLOAT = 13,
 	VFMT4_16_SINT = 16,
 	VFMT4_16_16_SINT = 17,
 	VFMT4_16_16_16_SINT = 18,
@@ -146,52 +154,76 @@
 	VFMT4_8_8_SNORM = 53,
 	VFMT4_8_8_8_SNORM = 54,
 	VFMT4_8_8_8_8_SNORM = 55,
-	VFMT4_10_10_10_2_UINT = 60,
-	VFMT4_10_10_10_2_UNORM = 61,
-	VFMT4_10_10_10_2_SINT = 62,
-	VFMT4_10_10_10_2_SNORM = 63,
+	VFMT4_10_10_10_2_UINT = 56,
+	VFMT4_10_10_10_2_UNORM = 57,
+	VFMT4_10_10_10_2_SINT = 58,
+	VFMT4_10_10_10_2_SNORM = 59,
+	VFMT4_2_10_10_10_UINT = 60,
+	VFMT4_2_10_10_10_UNORM = 61,
+	VFMT4_2_10_10_10_SINT = 62,
+	VFMT4_2_10_10_10_SNORM = 63,
 };
 
 enum a4xx_tex_fmt {
-	TFMT4_5_6_5_UNORM = 11,
-	TFMT4_5_5_5_1_UNORM = 10,
-	TFMT4_4_4_4_4_UNORM = 8,
-	TFMT4_X8Z24_UNORM = 71,
-	TFMT4_10_10_10_2_UNORM = 33,
 	TFMT4_A8_UNORM = 3,
-	TFMT4_L8_A8_UNORM = 13,
 	TFMT4_8_UNORM = 4,
-	TFMT4_8_8_UNORM = 14,
-	TFMT4_8_8_8_8_UNORM = 28,
 	TFMT4_8_SNORM = 5,
-	TFMT4_8_8_SNORM = 15,
-	TFMT4_8_8_8_8_SNORM = 29,
 	TFMT4_8_UINT = 6,
-	TFMT4_8_8_UINT = 16,
-	TFMT4_8_8_8_8_UINT = 30,
 	TFMT4_8_SINT = 7,
+	TFMT4_4_4_4_4_UNORM = 8,
+	TFMT4_5_5_5_1_UNORM = 9,
+	TFMT4_5_6_5_UNORM = 11,
+	TFMT4_L8_A8_UNORM = 13,
+	TFMT4_8_8_UNORM = 14,
+	TFMT4_8_8_SNORM = 15,
+	TFMT4_8_8_UINT = 16,
 	TFMT4_8_8_SINT = 17,
-	TFMT4_8_8_8_8_SINT = 31,
-	TFMT4_16_UINT = 21,
-	TFMT4_16_16_UINT = 41,
-	TFMT4_16_16_16_16_UINT = 54,
-	TFMT4_16_SINT = 22,
-	TFMT4_16_16_SINT = 42,
-	TFMT4_16_16_16_16_SINT = 55,
-	TFMT4_32_UINT = 44,
-	TFMT4_32_32_UINT = 57,
-	TFMT4_32_32_32_32_UINT = 64,
-	TFMT4_32_SINT = 45,
-	TFMT4_32_32_SINT = 58,
-	TFMT4_32_32_32_32_SINT = 65,
+	TFMT4_16_UNORM = 18,
+	TFMT4_16_SNORM = 19,
 	TFMT4_16_FLOAT = 20,
-	TFMT4_16_16_FLOAT = 40,
-	TFMT4_16_16_16_16_FLOAT = 53,
-	TFMT4_32_FLOAT = 43,
-	TFMT4_32_32_FLOAT = 56,
-	TFMT4_32_32_32_32_FLOAT = 63,
+	TFMT4_16_UINT = 21,
+	TFMT4_16_SINT = 22,
+	TFMT4_8_8_8_8_UNORM = 28,
+	TFMT4_8_8_8_8_SNORM = 29,
+	TFMT4_8_8_8_8_UINT = 30,
+	TFMT4_8_8_8_8_SINT = 31,
 	TFMT4_9_9_9_E5_FLOAT = 32,
+	TFMT4_10_10_10_2_UNORM = 33,
+	TFMT4_10_10_10_2_UINT = 34,
 	TFMT4_11_11_10_FLOAT = 37,
+	TFMT4_16_16_UNORM = 38,
+	TFMT4_16_16_SNORM = 39,
+	TFMT4_16_16_FLOAT = 40,
+	TFMT4_16_16_UINT = 41,
+	TFMT4_16_16_SINT = 42,
+	TFMT4_32_FLOAT = 43,
+	TFMT4_32_UINT = 44,
+	TFMT4_32_SINT = 45,
+	TFMT4_16_16_16_16_UNORM = 51,
+	TFMT4_16_16_16_16_SNORM = 52,
+	TFMT4_16_16_16_16_FLOAT = 53,
+	TFMT4_16_16_16_16_UINT = 54,
+	TFMT4_16_16_16_16_SINT = 55,
+	TFMT4_32_32_FLOAT = 56,
+	TFMT4_32_32_UINT = 57,
+	TFMT4_32_32_SINT = 58,
+	TFMT4_32_32_32_FLOAT = 59,
+	TFMT4_32_32_32_UINT = 60,
+	TFMT4_32_32_32_SINT = 61,
+	TFMT4_32_32_32_32_FLOAT = 63,
+	TFMT4_32_32_32_32_UINT = 64,
+	TFMT4_32_32_32_32_SINT = 65,
+	TFMT4_X8Z24_UNORM = 71,
+	TFMT4_DXT1 = 86,
+	TFMT4_DXT3 = 87,
+	TFMT4_DXT5 = 88,
+	TFMT4_RGTC1_UNORM = 90,
+	TFMT4_RGTC1_SNORM = 91,
+	TFMT4_RGTC2_UNORM = 94,
+	TFMT4_RGTC2_SNORM = 95,
+	TFMT4_BPTC_UFLOAT = 97,
+	TFMT4_BPTC_FLOAT = 98,
+	TFMT4_BPTC = 99,
 	TFMT4_ATC_RGB = 100,
 	TFMT4_ATC_RGBA_EXPLICIT = 101,
 	TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +272,545 @@
 	EVEN_SPACING = 3,
 };
 
+enum a4xx_ccu_perfcounter_select {
+	CCU_BUSY_CYCLES = 0,
+	CCU_RB_DEPTH_RETURN_STALL = 2,
+	CCU_RB_COLOR_RETURN_STALL = 3,
+	CCU_DEPTH_BLOCKS = 6,
+	CCU_COLOR_BLOCKS = 7,
+	CCU_DEPTH_BLOCK_HIT = 8,
+	CCU_COLOR_BLOCK_HIT = 9,
+	CCU_DEPTH_FLAG1_COUNT = 10,
+	CCU_DEPTH_FLAG2_COUNT = 11,
+	CCU_DEPTH_FLAG3_COUNT = 12,
+	CCU_DEPTH_FLAG4_COUNT = 13,
+	CCU_COLOR_FLAG1_COUNT = 14,
+	CCU_COLOR_FLAG2_COUNT = 15,
+	CCU_COLOR_FLAG3_COUNT = 16,
+	CCU_COLOR_FLAG4_COUNT = 17,
+	CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+	CP_ALWAYS_COUNT = 0,
+	CP_BUSY = 1,
+	CP_PFP_IDLE = 2,
+	CP_PFP_BUSY_WORKING = 3,
+	CP_PFP_STALL_CYCLES_ANY = 4,
+	CP_PFP_STARVE_CYCLES_ANY = 5,
+	CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+	CP_PFP_STALLED_PER_STORE_ADDR = 7,
+	CP_PFP_PC_PROFILE = 8,
+	CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+	CP_PFP_COND_INDIRECT_DISCARDED = 10,
+	CP_LONG_RESUMPTIONS = 11,
+	CP_RESUME_CYCLES = 12,
+	CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+	CP_LONG_PREEMPTIONS = 14,
+	CP_PREEMPT_CYCLES = 15,
+	CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+	CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+	CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+	CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+	CP_ME_FIFO_FULL_ME_BUSY = 20,
+	CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+	CP_ME_WAITING_FOR_PACKETS = 22,
+	CP_ME_BUSY_WORKING = 23,
+	CP_ME_STARVE_CYCLES_ANY = 24,
+	CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+	CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+	CP_ME_PC_PROFILE = 27,
+	CP_RCIU_FIFO_EMPTY = 28,
+	CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+	CP_RCIU_FIFO_FULL = 30,
+	CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+	CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+	CP_RCIU_FIFO_FULL_OTHER = 33,
+	CP_AHB_IDLE = 34,
+	CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+	CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+	CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+	CP_AHB_BUSY_WORKING = 38,
+	CP_AHB_BUSY_STALL_ON_HRDY = 39,
+	CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+	RAS_SUPER_TILES = 0,
+	RAS_8X8_TILES = 1,
+	RAS_4X4_TILES = 2,
+	RAS_BUSY_CYCLES = 3,
+	RAS_STALL_CYCLES_BY_RB = 4,
+	RAS_STALL_CYCLES_BY_VSC = 5,
+	RAS_STARVE_CYCLES_BY_TSE = 6,
+	RAS_SUPERTILE_CYCLES = 7,
+	RAS_TILE_CYCLES = 8,
+	RAS_FULLY_COVERED_SUPER_TILES = 9,
+	RAS_FULLY_COVERED_8X8_TILES = 10,
+	RAS_4X4_PRIM = 11,
+	RAS_8X4_4X8_PRIM = 12,
+	RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+	TSE_INPUT_PRIM = 0,
+	TSE_INPUT_NULL_PRIM = 1,
+	TSE_TRIVAL_REJ_PRIM = 2,
+	TSE_CLIPPED_PRIM = 3,
+	TSE_NEW_PRIM = 4,
+	TSE_ZERO_AREA_PRIM = 5,
+	TSE_FACENESS_CULLED_PRIM = 6,
+	TSE_ZERO_PIXEL_PRIM = 7,
+	TSE_OUTPUT_NULL_PRIM = 8,
+	TSE_OUTPUT_VISIBLE_PRIM = 9,
+	TSE_PRE_CLIP_PRIM = 10,
+	TSE_POST_CLIP_PRIM = 11,
+	TSE_BUSY_CYCLES = 12,
+	TSE_PC_STARVE = 13,
+	TSE_RAS_STALL = 14,
+	TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+	TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+	HLSQ_SP_VS_STAGE_CONSTANT = 0,
+	HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+	HLSQ_SP_FS_STAGE_CONSTANT = 2,
+	HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+	HLSQ_TP_STATE = 4,
+	HLSQ_QUADS = 5,
+	HLSQ_PIXELS = 6,
+	HLSQ_VERTICES = 7,
+	HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+	HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+	HLSQ_BUSY_CYCLES = 15,
+	HLSQ_STALL_CYCLES_SP_STATE = 16,
+	HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+	HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+	HLSQ_STALL_CYCLES_UCHE = 19,
+	HLSQ_RBBM_LOAD_CYCLES = 20,
+	HLSQ_DI_TO_VS_START_SP = 21,
+	HLSQ_DI_TO_FS_START_SP = 22,
+	HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+	HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+	HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+	HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+	HLSQ_UCHE_LATENCY_CYCLES = 27,
+	HLSQ_UCHE_LATENCY_COUNT = 28,
+	HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+	PC_VIS_STREAMS_LOADED = 0,
+	PC_VPC_PRIMITIVES = 2,
+	PC_DEAD_PRIM = 3,
+	PC_LIVE_PRIM = 4,
+	PC_DEAD_DRAWCALLS = 5,
+	PC_LIVE_DRAWCALLS = 6,
+	PC_VERTEX_MISSES = 7,
+	PC_STALL_CYCLES_VFD = 9,
+	PC_STALL_CYCLES_TSE = 10,
+	PC_STALL_CYCLES_UCHE = 11,
+	PC_WORKING_CYCLES = 12,
+	PC_IA_VERTICES = 13,
+	PC_GS_PRIMITIVES = 14,
+	PC_HS_INVOCATIONS = 15,
+	PC_DS_INVOCATIONS = 16,
+	PC_DS_PRIMITIVES = 17,
+	PC_STARVE_CYCLES_FOR_INDEX = 20,
+	PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+	PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+	PC_STALL_CYCLES_TESS = 23,
+	PC_STARVE_CYCLES_FOR_POSITION = 24,
+	PC_MODE0_DRAWCALL = 25,
+	PC_MODE1_DRAWCALL = 26,
+	PC_MODE2_DRAWCALL = 27,
+	PC_MODE3_DRAWCALL = 28,
+	PC_MODE4_DRAWCALL = 29,
+	PC_PREDICATED_DEAD_DRAWCALL = 30,
+	PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+	PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+	PC_VPC_POS_DATA_TRANSACTION = 33,
+	PC_BUSY_CYCLES = 34,
+	PC_STARVE_CYCLES_DI = 35,
+	PC_STALL_CYCLES_VPC = 36,
+	TESS_WORKING_CYCLES = 37,
+	TESS_NUM_CYCLES_SETUP_WORKING = 38,
+	TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+	TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+	TESS_BUSY_CYCLES = 41,
+	TESS_STARVE_CYCLES_PC = 42,
+	TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+	PWR_CORE_CLOCK_CYCLES = 0,
+	PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+	RB_BUSY_CYCLES = 0,
+	RB_BUSY_CYCLES_BINNING = 1,
+	RB_BUSY_CYCLES_RENDERING = 2,
+	RB_BUSY_CYCLES_RESOLVE = 3,
+	RB_STARVE_CYCLES_BY_SP = 4,
+	RB_STARVE_CYCLES_BY_RAS = 5,
+	RB_STARVE_CYCLES_BY_MARB = 6,
+	RB_STALL_CYCLES_BY_MARB = 7,
+	RB_STALL_CYCLES_BY_HLSQ = 8,
+	RB_RB_RB_MARB_DATA = 9,
+	RB_SP_RB_QUAD = 10,
+	RB_RAS_RB_Z_QUADS = 11,
+	RB_GMEM_CH0_READ = 12,
+	RB_GMEM_CH1_READ = 13,
+	RB_GMEM_CH0_WRITE = 14,
+	RB_GMEM_CH1_WRITE = 15,
+	RB_CP_CONTEXT_DONE = 16,
+	RB_CP_CACHE_FLUSH = 17,
+	RB_CP_ZPASS_DONE = 18,
+	RB_STALL_FIFO0_FULL = 19,
+	RB_STALL_FIFO1_FULL = 20,
+	RB_STALL_FIFO2_FULL = 21,
+	RB_STALL_FIFO3_FULL = 22,
+	RB_RB_HLSQ_TRANSACTIONS = 23,
+	RB_Z_READ = 24,
+	RB_Z_WRITE = 25,
+	RB_C_READ = 26,
+	RB_C_WRITE = 27,
+	RB_C_READ_LATENCY = 28,
+	RB_Z_READ_LATENCY = 29,
+	RB_STALL_BY_UCHE = 30,
+	RB_MARB_UCHE_TRANSACTIONS = 31,
+	RB_CACHE_STALL_MISS = 32,
+	RB_CACHE_STALL_FIFO_FULL = 33,
+	RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+	RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+	RB_SAMPLER_UNITS_ACTIVE = 36,
+	RB_TOTAL_PASS = 38,
+	RB_Z_PASS = 39,
+	RB_Z_FAIL = 40,
+	RB_S_FAIL = 41,
+	RB_POWER0 = 42,
+	RB_POWER1 = 43,
+	RB_POWER2 = 44,
+	RB_POWER3 = 45,
+	RB_POWER4 = 46,
+	RB_POWER5 = 47,
+	RB_POWER6 = 48,
+	RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+	RBBM_ALWAYS_ON = 0,
+	RBBM_VBIF_BUSY = 1,
+	RBBM_TSE_BUSY = 2,
+	RBBM_RAS_BUSY = 3,
+	RBBM_PC_DCALL_BUSY = 4,
+	RBBM_PC_VSD_BUSY = 5,
+	RBBM_VFD_BUSY = 6,
+	RBBM_VPC_BUSY = 7,
+	RBBM_UCHE_BUSY = 8,
+	RBBM_VSC_BUSY = 9,
+	RBBM_HLSQ_BUSY = 10,
+	RBBM_ANY_RB_BUSY = 11,
+	RBBM_ANY_TPL1_BUSY = 12,
+	RBBM_ANY_SP_BUSY = 13,
+	RBBM_ANY_MARB_BUSY = 14,
+	RBBM_ANY_ARB_BUSY = 15,
+	RBBM_AHB_STATUS_BUSY = 16,
+	RBBM_AHB_STATUS_STALLED = 17,
+	RBBM_AHB_STATUS_TXFR = 18,
+	RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+	RBBM_AHB_STATUS_TXFR_ERROR = 20,
+	RBBM_AHB_STATUS_LONG_STALL = 21,
+	RBBM_STATUS_MASKED = 22,
+	RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+	RBBM_TESS_BUSY = 24,
+	RBBM_COM_BUSY = 25,
+	RBBM_DCOM_BUSY = 32,
+	RBBM_ANY_CCU_BUSY = 33,
+	RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+	SP_LM_LOAD_INSTRUCTIONS = 0,
+	SP_LM_STORE_INSTRUCTIONS = 1,
+	SP_LM_ATOMICS = 2,
+	SP_GM_LOAD_INSTRUCTIONS = 3,
+	SP_GM_STORE_INSTRUCTIONS = 4,
+	SP_GM_ATOMICS = 5,
+	SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+	SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+	SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+	SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+	SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+	SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+	SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+	SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+	SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+	SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+	SP_VS_INSTRUCTIONS = 17,
+	SP_FS_INSTRUCTIONS = 18,
+	SP_ADDR_LOCK_COUNT = 19,
+	SP_UCHE_READ_TRANS = 20,
+	SP_UCHE_WRITE_TRANS = 21,
+	SP_EXPORT_VPC_TRANS = 22,
+	SP_EXPORT_RB_TRANS = 23,
+	SP_PIXELS_KILLED = 24,
+	SP_ICL1_REQUESTS = 25,
+	SP_ICL1_MISSES = 26,
+	SP_ICL0_REQUESTS = 27,
+	SP_ICL0_MISSES = 28,
+	SP_ALU_WORKING_CYCLES = 29,
+	SP_EFU_WORKING_CYCLES = 30,
+	SP_STALL_CYCLES_BY_VPC = 31,
+	SP_STALL_CYCLES_BY_TP = 32,
+	SP_STALL_CYCLES_BY_UCHE = 33,
+	SP_STALL_CYCLES_BY_RB = 34,
+	SP_BUSY_CYCLES = 35,
+	SP_HS_INSTRUCTIONS = 36,
+	SP_DS_INSTRUCTIONS = 37,
+	SP_GS_INSTRUCTIONS = 38,
+	SP_CS_INSTRUCTIONS = 39,
+	SP_SCHEDULER_NON_WORKING = 40,
+	SP_WAVE_CONTEXTS = 41,
+	SP_WAVE_CONTEXT_CYCLES = 42,
+	SP_POWER0 = 43,
+	SP_POWER1 = 44,
+	SP_POWER2 = 45,
+	SP_POWER3 = 46,
+	SP_POWER4 = 47,
+	SP_POWER5 = 48,
+	SP_POWER6 = 49,
+	SP_POWER7 = 50,
+	SP_POWER8 = 51,
+	SP_POWER9 = 52,
+	SP_POWER10 = 53,
+	SP_POWER11 = 54,
+	SP_POWER12 = 55,
+	SP_POWER13 = 56,
+	SP_POWER14 = 57,
+	SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+	TP_L1_REQUESTS = 0,
+	TP_L1_MISSES = 1,
+	TP_QUADS_OFFSET = 8,
+	TP_QUAD_SHADOW = 9,
+	TP_QUADS_ARRAY = 10,
+	TP_QUADS_GRADIENT = 11,
+	TP_QUADS_1D2D = 12,
+	TP_QUADS_3DCUBE = 13,
+	TP_BUSY_CYCLES = 16,
+	TP_STALL_CYCLES_BY_ARB = 17,
+	TP_STATE_CACHE_REQUESTS = 20,
+	TP_STATE_CACHE_MISSES = 21,
+	TP_POWER0 = 22,
+	TP_POWER1 = 23,
+	TP_POWER2 = 24,
+	TP_POWER3 = 25,
+	TP_POWER4 = 26,
+	TP_POWER5 = 27,
+	TP_POWER6 = 28,
+	TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+	UCHE_VBIF_READ_BEATS_TP = 0,
+	UCHE_VBIF_READ_BEATS_VFD = 1,
+	UCHE_VBIF_READ_BEATS_HLSQ = 2,
+	UCHE_VBIF_READ_BEATS_MARB = 3,
+	UCHE_VBIF_READ_BEATS_SP = 4,
+	UCHE_READ_REQUESTS_TP = 5,
+	UCHE_READ_REQUESTS_VFD = 6,
+	UCHE_READ_REQUESTS_HLSQ = 7,
+	UCHE_READ_REQUESTS_MARB = 8,
+	UCHE_READ_REQUESTS_SP = 9,
+	UCHE_WRITE_REQUESTS_MARB = 10,
+	UCHE_WRITE_REQUESTS_SP = 11,
+	UCHE_TAG_CHECK_FAILS = 12,
+	UCHE_EVICTS = 13,
+	UCHE_FLUSHES = 14,
+	UCHE_VBIF_LATENCY_CYCLES = 15,
+	UCHE_VBIF_LATENCY_SAMPLES = 16,
+	UCHE_BUSY_CYCLES = 17,
+	UCHE_VBIF_READ_BEATS_PC = 18,
+	UCHE_READ_REQUESTS_PC = 19,
+	UCHE_WRITE_REQUESTS_VPC = 20,
+	UCHE_STALL_BY_VBIF = 21,
+	UCHE_WRITE_REQUESTS_VSC = 22,
+	UCHE_POWER0 = 23,
+	UCHE_POWER1 = 24,
+	UCHE_POWER2 = 25,
+	UCHE_POWER3 = 26,
+	UCHE_POWER4 = 27,
+	UCHE_POWER5 = 28,
+	UCHE_POWER6 = 29,
+	UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+	AXI_READ_REQUESTS_ID_0 = 0,
+	AXI_READ_REQUESTS_ID_1 = 1,
+	AXI_READ_REQUESTS_ID_2 = 2,
+	AXI_READ_REQUESTS_ID_3 = 3,
+	AXI_READ_REQUESTS_ID_4 = 4,
+	AXI_READ_REQUESTS_ID_5 = 5,
+	AXI_READ_REQUESTS_ID_6 = 6,
+	AXI_READ_REQUESTS_ID_7 = 7,
+	AXI_READ_REQUESTS_ID_8 = 8,
+	AXI_READ_REQUESTS_ID_9 = 9,
+	AXI_READ_REQUESTS_ID_10 = 10,
+	AXI_READ_REQUESTS_ID_11 = 11,
+	AXI_READ_REQUESTS_ID_12 = 12,
+	AXI_READ_REQUESTS_ID_13 = 13,
+	AXI_READ_REQUESTS_ID_14 = 14,
+	AXI_READ_REQUESTS_ID_15 = 15,
+	AXI0_READ_REQUESTS_TOTAL = 16,
+	AXI1_READ_REQUESTS_TOTAL = 17,
+	AXI2_READ_REQUESTS_TOTAL = 18,
+	AXI3_READ_REQUESTS_TOTAL = 19,
+	AXI_READ_REQUESTS_TOTAL = 20,
+	AXI_WRITE_REQUESTS_ID_0 = 21,
+	AXI_WRITE_REQUESTS_ID_1 = 22,
+	AXI_WRITE_REQUESTS_ID_2 = 23,
+	AXI_WRITE_REQUESTS_ID_3 = 24,
+	AXI_WRITE_REQUESTS_ID_4 = 25,
+	AXI_WRITE_REQUESTS_ID_5 = 26,
+	AXI_WRITE_REQUESTS_ID_6 = 27,
+	AXI_WRITE_REQUESTS_ID_7 = 28,
+	AXI_WRITE_REQUESTS_ID_8 = 29,
+	AXI_WRITE_REQUESTS_ID_9 = 30,
+	AXI_WRITE_REQUESTS_ID_10 = 31,
+	AXI_WRITE_REQUESTS_ID_11 = 32,
+	AXI_WRITE_REQUESTS_ID_12 = 33,
+	AXI_WRITE_REQUESTS_ID_13 = 34,
+	AXI_WRITE_REQUESTS_ID_14 = 35,
+	AXI_WRITE_REQUESTS_ID_15 = 36,
+	AXI0_WRITE_REQUESTS_TOTAL = 37,
+	AXI1_WRITE_REQUESTS_TOTAL = 38,
+	AXI2_WRITE_REQUESTS_TOTAL = 39,
+	AXI3_WRITE_REQUESTS_TOTAL = 40,
+	AXI_WRITE_REQUESTS_TOTAL = 41,
+	AXI_TOTAL_REQUESTS = 42,
+	AXI_READ_DATA_BEATS_ID_0 = 43,
+	AXI_READ_DATA_BEATS_ID_1 = 44,
+	AXI_READ_DATA_BEATS_ID_2 = 45,
+	AXI_READ_DATA_BEATS_ID_3 = 46,
+	AXI_READ_DATA_BEATS_ID_4 = 47,
+	AXI_READ_DATA_BEATS_ID_5 = 48,
+	AXI_READ_DATA_BEATS_ID_6 = 49,
+	AXI_READ_DATA_BEATS_ID_7 = 50,
+	AXI_READ_DATA_BEATS_ID_8 = 51,
+	AXI_READ_DATA_BEATS_ID_9 = 52,
+	AXI_READ_DATA_BEATS_ID_10 = 53,
+	AXI_READ_DATA_BEATS_ID_11 = 54,
+	AXI_READ_DATA_BEATS_ID_12 = 55,
+	AXI_READ_DATA_BEATS_ID_13 = 56,
+	AXI_READ_DATA_BEATS_ID_14 = 57,
+	AXI_READ_DATA_BEATS_ID_15 = 58,
+	AXI0_READ_DATA_BEATS_TOTAL = 59,
+	AXI1_READ_DATA_BEATS_TOTAL = 60,
+	AXI2_READ_DATA_BEATS_TOTAL = 61,
+	AXI3_READ_DATA_BEATS_TOTAL = 62,
+	AXI_READ_DATA_BEATS_TOTAL = 63,
+	AXI_WRITE_DATA_BEATS_ID_0 = 64,
+	AXI_WRITE_DATA_BEATS_ID_1 = 65,
+	AXI_WRITE_DATA_BEATS_ID_2 = 66,
+	AXI_WRITE_DATA_BEATS_ID_3 = 67,
+	AXI_WRITE_DATA_BEATS_ID_4 = 68,
+	AXI_WRITE_DATA_BEATS_ID_5 = 69,
+	AXI_WRITE_DATA_BEATS_ID_6 = 70,
+	AXI_WRITE_DATA_BEATS_ID_7 = 71,
+	AXI_WRITE_DATA_BEATS_ID_8 = 72,
+	AXI_WRITE_DATA_BEATS_ID_9 = 73,
+	AXI_WRITE_DATA_BEATS_ID_10 = 74,
+	AXI_WRITE_DATA_BEATS_ID_11 = 75,
+	AXI_WRITE_DATA_BEATS_ID_12 = 76,
+	AXI_WRITE_DATA_BEATS_ID_13 = 77,
+	AXI_WRITE_DATA_BEATS_ID_14 = 78,
+	AXI_WRITE_DATA_BEATS_ID_15 = 79,
+	AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+	AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+	AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+	AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+	AXI_WRITE_DATA_BEATS_TOTAL = 84,
+	AXI_DATA_BEATS_TOTAL = 85,
+	CYCLES_HELD_OFF_ID_0 = 86,
+	CYCLES_HELD_OFF_ID_1 = 87,
+	CYCLES_HELD_OFF_ID_2 = 88,
+	CYCLES_HELD_OFF_ID_3 = 89,
+	CYCLES_HELD_OFF_ID_4 = 90,
+	CYCLES_HELD_OFF_ID_5 = 91,
+	CYCLES_HELD_OFF_ID_6 = 92,
+	CYCLES_HELD_OFF_ID_7 = 93,
+	CYCLES_HELD_OFF_ID_8 = 94,
+	CYCLES_HELD_OFF_ID_9 = 95,
+	CYCLES_HELD_OFF_ID_10 = 96,
+	CYCLES_HELD_OFF_ID_11 = 97,
+	CYCLES_HELD_OFF_ID_12 = 98,
+	CYCLES_HELD_OFF_ID_13 = 99,
+	CYCLES_HELD_OFF_ID_14 = 100,
+	CYCLES_HELD_OFF_ID_15 = 101,
+	AXI_READ_REQUEST_HELD_OFF = 102,
+	AXI_WRITE_REQUEST_HELD_OFF = 103,
+	AXI_REQUEST_HELD_OFF = 104,
+	AXI_WRITE_DATA_HELD_OFF = 105,
+	OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+	OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+	OCMEM_AXI_REQUEST_HELD_OFF = 108,
+	OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+	ELAPSED_CYCLES_DDR = 110,
+	ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+	VFD_UCHE_BYTE_FETCHED = 0,
+	VFD_UCHE_TRANS = 1,
+	VFD_FETCH_INSTRUCTIONS = 3,
+	VFD_BUSY_CYCLES = 5,
+	VFD_STALL_CYCLES_UCHE = 6,
+	VFD_STALL_CYCLES_HLSQ = 7,
+	VFD_STALL_CYCLES_VPC_BYPASS = 8,
+	VFD_STALL_CYCLES_VPC_ALLOC = 9,
+	VFD_MODE_0_FIBERS = 13,
+	VFD_MODE_1_FIBERS = 14,
+	VFD_MODE_2_FIBERS = 15,
+	VFD_MODE_3_FIBERS = 16,
+	VFD_MODE_4_FIBERS = 17,
+	VFD_BFIFO_STALL = 18,
+	VFD_NUM_VERTICES_TOTAL = 19,
+	VFD_PACKER_FULL = 20,
+	VFD_UCHE_REQUEST_FIFO_FULL = 21,
+	VFD_STARVE_CYCLES_PC = 22,
+	VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+	VPC_SP_LM_COMPONENTS = 2,
+	VPC_SP0_LM_BYTES = 3,
+	VPC_SP1_LM_BYTES = 4,
+	VPC_SP2_LM_BYTES = 5,
+	VPC_SP3_LM_BYTES = 6,
+	VPC_WORKING_CYCLES = 7,
+	VPC_STALL_CYCLES_LM = 8,
+	VPC_STARVE_CYCLES_RAS = 9,
+	VPC_STREAMOUT_CYCLES = 10,
+	VPC_UCHE_TRANSACTIONS = 12,
+	VPC_STALL_CYCLES_UCHE = 13,
+	VPC_BUSY_CYCLES = 14,
+	VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+	VSC_BUSY_CYCLES = 0,
+	VSC_WORKING_CYCLES = 1,
+	VSC_STALL_CYCLES_UCHE = 2,
+	VSC_STARVE_CYCLES_RAS = 3,
+	VSC_EOT_NUM = 4,
+};
+
 enum a4xx_tex_filter {
 	A4XX_TEX_NEAREST = 0,
 	A4XX_TEX_LINEAR = 1,
@@ -326,6 +897,12 @@
 
 #define REG_A4XX_RB_PERFCTR_RB_SEL_7				0x00000cce
 
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0				0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1				0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2				0x00000cd1
+
 #define REG_A4XX_RB_PERFCTR_CCU_SEL_3				0x00000cd2
 
 #define REG_A4XX_RB_FRAME_BUFFER_DIMENSION			0x00000ce0
@@ -400,8 +977,13 @@
 #define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE			0x00000008
 #define A4XX_RB_MRT_CONTROL_BLEND				0x00000010
 #define A4XX_RB_MRT_CONTROL_BLEND2				0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR				0x00000400
-#define A4XX_RB_MRT_CONTROL_B11					0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE				0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK			0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT			8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+	return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
 #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK		0x0f000000
 #define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT		24
 static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -490,8 +1072,8 @@
 	return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_RED					0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_RED					0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK				0x0000ffff
 #define A4XX_RB_BLEND_RED_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
 {
@@ -504,8 +1086,16 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_GREEN					0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32				0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN					0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK				0x0000ffff
 #define A4XX_RB_BLEND_GREEN_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
 {
@@ -518,8 +1108,16 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
 }
 
-#define REG_A4XX_RB_BLEND_BLUE					0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK				0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32				0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE					0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK				0x0000ffff
 #define A4XX_RB_BLEND_BLUE_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
 {
@@ -532,8 +1130,16 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
 }
 
+#define REG_A4XX_RB_BLEND_BLUE_F32				0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
 #define REG_A4XX_RB_BLEND_ALPHA					0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK				0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK				0x0000ffff
 #define A4XX_RB_BLEND_ALPHA_UINT__SHIFT				0
 static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
 {
@@ -546,6 +1152,14 @@
 	return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
 }
 
+#define REG_A4XX_RB_BLEND_ALPHA_F32				0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK				0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT				0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+	return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
 #define REG_A4XX_RB_ALPHA_CONTROL				0x000020f8
 #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK			0x000000ff
 #define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT			0
@@ -568,7 +1182,7 @@
 {
 	return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
 }
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR				0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND			0x00000100
 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK			0xffff0000
 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT			16
 static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -736,6 +1350,7 @@
 }
 #define A4XX_RB_DEPTH_CONTROL_BF_ENABLE				0x00000080
 #define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE			0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS			0x00020000
 #define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE			0x80000000
 
 #define REG_A4XX_RB_DEPTH_CLEAR					0x00002102
@@ -996,8 +1611,386 @@
 
 #define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D				0x0000004d
 
+#define REG_A4XX_RBBM_POWER_CNTL_IP				0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE			0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON			0x00100000
+
 #define REG_A4XX_RBBM_PERFCTR_CP_0_LO				0x0000009c
 
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI				0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO				0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI				0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO				0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI				0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO				0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI				0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO				0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI				0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO				0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI				0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO				0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI				0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO				0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI				0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO				0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI				0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO				0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI				0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO				0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI				0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO				0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI				0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO				0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI				0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO				0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI				0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO				0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI				0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO				0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI				0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO				0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI				0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO				0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI				0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO				0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI				0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO				0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI				0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO				0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI				0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO				0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI				0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO				0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI				0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO				0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI				0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO				0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI				0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO				0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI				0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO				0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI				0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO				0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI				0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO				0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI				0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO				0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI				0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO				0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI				0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO				0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI				0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO				0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI				0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO				0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI				0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO				0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI				0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO				0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI				0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO				0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI				0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO				0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI				0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO				0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI				0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO				0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI				0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO				0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI				0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO				0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI				0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO				0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI				0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO				0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI				0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO				0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI				0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO				0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI				0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO				0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI				0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO				0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI				0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO				0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI				0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO				0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI				0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO				0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI				0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO				0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI				0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO				0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI				0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO				0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI				0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO				0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI				0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO				0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI				0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO				0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI				0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO				0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI				0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO				0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI				0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO				0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI				0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO				0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI				0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO				0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI				0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO				0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI				0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO				0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI				0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO				0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI				0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO				0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI				0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO				0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI				0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO				0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI				0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO				0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI				0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO				0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI				0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO				0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI				0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO				0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI				0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO				0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI				0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO				0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI				0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO				0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI				0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO				0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI				0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO				0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI				0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO				0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI				0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO				0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI				0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO				0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI				0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO				0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI				0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO				0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI				0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO				0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI				0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO				0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI				0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO				0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI				0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO				0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI				0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO				0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI				0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO				0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI				0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO				0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI				0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO				0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI				0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO				0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI				0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO				0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI				0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO				0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI				0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO			0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI			0x0000016f
+
 static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
 
 static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2039,10 @@
 
 static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
 
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0			0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1			0x0000009a
+
 #define REG_A4XX_RBBM_PERFCTR_PWR_1_LO				0x00000168
 
 #define REG_A4XX_RBBM_PERFCTR_CTL				0x00000170
@@ -1060,6 +2057,14 @@
 
 #define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI			0x00000175
 
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0			0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1			0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2			0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3			0x00000179
+
 #define REG_A4XX_RBBM_GPU_BUSY_MASKED				0x0000017a
 
 #define REG_A4XX_RBBM_INT_0_STATUS				0x0000017d
@@ -1099,6 +2104,11 @@
 
 #define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5			0x0000019f
 
+#define REG_A4XX_RBBM_POWER_STATUS				0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON			0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2			0x000001b8
+
 #define REG_A4XX_CP_SCRATCH_UMASK				0x00000228
 
 #define REG_A4XX_CP_SCRATCH_ADDR				0x00000229
@@ -1191,6 +2201,20 @@
 
 #define REG_A4XX_CP_PERFCTR_CP_SEL_0				0x00000500
 
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1				0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2				0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3				0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4				0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5				0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6				0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7				0x00000507
+
 #define REG_A4XX_CP_PERFCOMBINER_SELECT				0x0000050b
 
 static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2225,28 @@
 
 #define REG_A4XX_SP_MODE_CONTROL				0x00000ec3
 
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0				0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1				0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2				0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3				0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4				0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5				0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6				0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7				0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8				0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9				0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10				0x00000ece
+
 #define REG_A4XX_SP_PERFCTR_SP_SEL_11				0x00000ecf
 
 #define REG_A4XX_SP_SP_CTRL_REG					0x000022c0
@@ -1699,6 +2745,12 @@
 
 #define REG_A4XX_VPC_DEBUG_ECO_CONTROL				0x00000e64
 
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0				0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1				0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2				0x00000e67
+
 #define REG_A4XX_VPC_PERFCTR_VPC_SEL_3				0x00000e68
 
 #define REG_A4XX_VPC_ATTR					0x00002140
@@ -1811,6 +2863,20 @@
 
 #define REG_A4XX_VFD_DEBUG_CONTROL				0x00000e40
 
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0				0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1				0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2				0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3				0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4				0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5				0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6				0x00000e49
+
 #define REG_A4XX_VFD_PERFCTR_VFD_SEL_7				0x00000e4a
 
 #define REG_A4XX_VGT_CL_INITIATOR				0x000021d0
@@ -1967,6 +3033,20 @@
 
 #define REG_A4XX_TPL1_TP_MODE_CONTROL				0x00000f03
 
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0				0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1				0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2				0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3				0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4				0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5				0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6				0x00000f0a
+
 #define REG_A4XX_TPL1_PERFCTR_TP_SEL_7				0x00000f0b
 
 #define REG_A4XX_TPL1_TP_TEX_OFFSET				0x00002380
@@ -2021,9 +3101,23 @@
 
 #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0				0x00000c88
 
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1				0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2				0x00000c8a
+
 #define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3				0x00000c8b
 
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0				0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1				0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2				0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3				0x00000c8f
+
 #define REG_A4XX_GRAS_CL_CLIP_CNTL				0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE			0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z			0x00400000
 
 #define REG_A4XX_GRAS_CLEAR_CNTL				0x00002003
 #define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR			0x00000001
@@ -2114,6 +3208,7 @@
 
 #define REG_A4XX_GRAS_ALPHA_CONTROL				0x00002073
 #define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE		0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS		0x00000008
 
 #define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE			0x00002074
 #define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK			0xffffffff
@@ -2285,6 +3380,20 @@
 
 #define REG_A4XX_UCHE_CACHE_WAYS_VFD				0x00000e8c
 
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0			0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1			0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2			0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3			0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4			0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5			0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6			0x00000e94
+
 #define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7			0x00000e95
 
 #define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD				0x00000e00
@@ -2295,6 +3404,22 @@
 
 #define REG_A4XX_HLSQ_PERF_PIPE_MASK				0x00000e0e
 
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0			0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1			0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2			0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3			0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4			0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5			0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6			0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7			0x00000e0d
+
 #define REG_A4XX_HLSQ_CONTROL_0_REG				0x000023c0
 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK		0x00000010
 #define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT		4
@@ -2549,6 +3674,18 @@
 
 #define REG_A4XX_PC_PERFCTR_PC_SEL_0				0x00000d10
 
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1				0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2				0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3				0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4				0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5				0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6				0x00000d16
+
 #define REG_A4XX_PC_PERFCTR_PC_SEL_7				0x00000d17
 
 #define REG_A4XX_PC_BIN_BASE					0x000021c0
@@ -2564,7 +3701,20 @@
 #define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST		0x02000000
 #define A4XX_PC_PRIM_VTX_CNTL_PSIZE				0x04000000
 
-#define REG_A4XX_UNKNOWN_21C5					0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2				0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK	0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT	0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+	return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK	0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT	3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+	return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE			0x00000040
 
 #define REG_A4XX_PC_RESTART_INDEX				0x000021c6
 
@@ -2646,20 +3796,6 @@
 
 #define REG_A4XX_UNKNOWN_20EF					0x000020ef
 
-#define REG_A4XX_UNKNOWN_20F0					0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1					0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2					0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7					0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK					0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT				0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
-	return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
 #define REG_A4XX_UNKNOWN_2152					0x00002152
 
 #define REG_A4XX_UNKNOWN_2153					0x00002153
@@ -2720,6 +3856,12 @@
 {
 	return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
 }
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK				0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT				19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+	return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
 
 #define REG_A4XX_TEX_SAMP_1					0x00000001
 #define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK			0x0000000e
@@ -2728,6 +3870,7 @@
 {
 	return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
 }
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF			0x00000010
 #define A4XX_TEX_SAMP_1_UNNORM_COORDS				0x00000020
 #define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR			0x00000040
 #define A4XX_TEX_SAMP_1_MAX_LOD__MASK				0x000fff00
@@ -2796,7 +3939,7 @@
 {
 	return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
 }
-#define A4XX_TEX_CONST_1_WIDTH__MASK				0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK				0x3fff8000
 #define A4XX_TEX_CONST_1_WIDTH__SHIFT				15
 static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
 {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a53f1be..d0d3c7b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -102,11 +102,17 @@
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
-	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
-	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+	/* Early A430's have a timing issue with SP/TP power collapse;
+	   disabling HW clock gating prevents it. */
+	if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+	else
+		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
 }
 
+
 static void a4xx_me_init(struct msm_gpu *gpu)
 {
 	struct msm_ringbuffer *ring = gpu->rb;
@@ -141,7 +147,7 @@
 	uint32_t *ptr, len;
 	int i, ret;
 
-	if (adreno_is_a4xx(adreno_gpu)) {
+	if (adreno_is_a420(adreno_gpu)) {
 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +156,13 @@
 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+	} else if (adreno_is_a430(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
 	} else {
 		BUG();
 	}
@@ -161,6 +174,10 @@
 	gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
 	gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
 
+	if (adreno_is_a430(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+	}
+
 	 /* Enable the RBBM error reporting bits */
 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
 
@@ -183,6 +200,14 @@
 	/* Turn on performance counters: */
 	gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
 
+	/* use the first CP counter for timestamp queries.. userspace may set
+	 * this as well but it selects the same counter/countable:
+	 */
+	gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+	if (adreno_is_a430(adreno_gpu))
+		gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
 	/* Disable L2 bypass to avoid UCHE out of bounds errors */
 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +215,15 @@
 	gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
 			(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
 
+	/* On A430 enable SP regfile sleep for power savings */
+	/* TODO downstream does this for !420, so maybe applies for 405 too? */
+	if (!adreno_is_a420(adreno_gpu)) {
+		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+			0x00000441);
+		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+			0x00000441);
+	}
+
 	a4xx_enable_hwcg(gpu);
 
 	/*
@@ -204,10 +238,6 @@
 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
 	}
 
-	ret = adreno_hw_init(gpu);
-	if (ret)
-		return ret;
-
 	/* setup access protection: */
 	gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
 
@@ -263,6 +293,7 @@
 	gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
 
 	a4xx_me_init(gpu);
+
 	return 0;
 }
 
@@ -317,6 +348,13 @@
 	status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
 	DBG("%s: Int status %08x", gpu->name, status);
 
+	if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
+		uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
+		printk("CP | Protected mode error| %s | addr=%x\n",
+			reg & (1 << 24) ? "WRITE" : "READ",
+			(reg & 0xFFFFF) >> 2);
+	}
+
 	gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
 
 	msm_gpu_retire(gpu);
@@ -512,12 +550,63 @@
 	adreno_dump(gpu);
 }
 
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
+
+	ret = msm_gpu_pm_resume(gpu);
+	if (ret)
+		return ret;
+
+	if (adreno_is_a430(adreno_gpu)) {
+		unsigned int reg;
+		/* Set the default register values; set SW_COLLAPSE to 0 */
+		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+		do {
+			udelay(5);
+			reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+		} while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+	}
+	return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
+
+	ret = msm_gpu_pm_suspend(gpu);
+	if (ret)
+		return ret;
+
+	if (adreno_is_a430(adreno_gpu)) {
+		/* Set the default register values; set SW_COLLAPSE to 1 */
+		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+	}
+	return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+	uint32_t hi, lo, tmp;
+
+	tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+	do {
+		hi = tmp;
+		lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+		tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+	} while (tmp != hi);
+
+	*value = (((uint64_t)hi) << 32) | lo;
+
+	return 0;
+}
+
 static const struct adreno_gpu_funcs funcs = {
 	.base = {
 		.get_param = adreno_get_param,
 		.hw_init = a4xx_hw_init,
-		.pm_suspend = msm_gpu_pm_suspend,
-		.pm_resume = msm_gpu_pm_resume,
+		.pm_suspend = a4xx_pm_suspend,
+		.pm_resume = a4xx_pm_resume,
 		.recover = a4xx_recover,
 		.last_fence = adreno_last_fence,
 		.submit = adreno_submit,
@@ -529,6 +618,7 @@
 		.show = a4xx_show,
 #endif
 	},
+	.get_timestamp = a4xx_get_timestamp,
 };
 
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index c304468..e81481d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -9,16 +9,17 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -119,6 +120,23 @@
 	RB_COPY_DEPTH_STENCIL = 5,
 };
 
+enum a3xx_rop_code {
+	ROP_CLEAR = 0,
+	ROP_NOR = 1,
+	ROP_AND_INVERTED = 2,
+	ROP_COPY_INVERTED = 3,
+	ROP_AND_REVERSE = 4,
+	ROP_INVERT = 5,
+	ROP_NAND = 7,
+	ROP_AND = 8,
+	ROP_EQUIV = 9,
+	ROP_NOOP = 10,
+	ROP_OR_INVERTED = 11,
+	ROP_OR_REVERSE = 13,
+	ROP_OR = 14,
+	ROP_SET = 15,
+};
+
 enum a3xx_render_mode {
 	RB_RENDERING_PASS = 0,
 	RB_TILING_PASS = 1,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 950d27d..5127b75 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -69,6 +69,14 @@
 		.pfpfw = "a420_pfp.fw",
 		.gmem  = (SZ_1M + SZ_512K),
 		.init  = a4xx_gpu_init,
+	}, {
+		.rev   = ADRENO_REV(4, 3, 0, ANY_ID),
+		.revn  = 430,
+		.name  = "A430",
+		.pm4fw = "a420_pm4.fw",
+		.pfpfw = "a420_pfp.fw",
+		.gmem  = (SZ_1M + SZ_512K),
+		.init  = a4xx_gpu_init,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a3b54cc..4951172 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -41,6 +41,13 @@
 				(adreno_gpu->rev.major << 16) |
 				(adreno_gpu->rev.core << 24);
 		return 0;
+	case MSM_PARAM_MAX_FREQ:
+		*value = adreno_gpu->base.fast_rate;
+		return 0;
+	case MSM_PARAM_TIMESTAMP:
+		if (adreno_gpu->funcs->get_timestamp)
+			return adreno_gpu->funcs->get_timestamp(gpu, value);
+		return -EINVAL;
 	default:
 		DBG("%s: invalid param: %u", gpu->name, param);
 		return -EINVAL;
@@ -68,18 +75,15 @@
 	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
 			/* size is log2(quad-words): */
 			AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
-			AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
+			AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
+			(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
 
 	/* Setup ringbuffer address: */
 	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
-			rbmemptr(adreno_gpu, rptr));
 
-	/* Setup scratch/timestamp: */
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
-			rbmemptr(adreno_gpu, fence));
-
-	adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+	if (!adreno_is_a430(adreno_gpu))
+		adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+						rbmemptr(adreno_gpu, rptr));
 
 	return 0;
 }
@@ -89,6 +93,16 @@
 	return ring->cur - ring->start;
 }
 
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+{
+	if (adreno_is_a430(adreno_gpu))
+		return adreno_gpu->memptrs->rptr = adreno_gpu_read(
+			adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+	else
+		return adreno_gpu->memptrs->rptr;
+}
+
 uint32_t adreno_last_fence(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -137,7 +151,8 @@
 			if (priv->lastctx == ctx)
 				break;
 		case MSM_SUBMIT_CMD_BUF:
-			OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+			OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+				CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
 			OUT_RING(ring, submit->cmd[i].iova);
 			OUT_RING(ring, submit->cmd[i].size);
 			ibs++;
@@ -216,9 +231,12 @@
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 	uint32_t wptr = get_wptr(gpu->rb);
+	int ret;
 
 	/* wait for CP to drain ringbuffer: */
-	if (spin_until(adreno_gpu->memptrs->rptr == wptr))
+	ret = spin_until(get_rptr(adreno_gpu) == wptr);
+
+	if (ret)
 		DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
 
 	/* TODO maybe we need to reset GPU here to recover from hang? */
@@ -237,7 +255,7 @@
 
 	seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
 			gpu->submitted_fence);
-	seq_printf(m, "rptr:     %d\n", adreno_gpu->memptrs->rptr);
+	seq_printf(m, "rptr:     %d\n", get_rptr(adreno_gpu));
 	seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
 
@@ -278,7 +296,7 @@
 
 	printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
 			gpu->submitted_fence);
-	printk("rptr:     %d\n", adreno_gpu->memptrs->rptr);
+	printk("rptr:     %d\n", get_rptr(adreno_gpu));
 	printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
 	printk("rb wptr:  %d\n", get_wptr(gpu->rb));
 
@@ -313,7 +331,7 @@
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
 	uint32_t size = gpu->rb->size / 4;
 	uint32_t wptr = get_wptr(gpu->rb);
-	uint32_t rptr = adreno_gpu->memptrs->rptr;
+	uint32_t rptr = get_rptr(adreno_gpu);
 	return (rptr + (size - 1) - wptr) % size;
 }
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0a312e9..1d07511 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -114,6 +114,7 @@
 
 struct adreno_gpu_funcs {
 	struct msm_gpu_funcs base;
+	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
 };
 
 struct adreno_info {
@@ -228,6 +229,11 @@
 	return gpu->revn == 420;
 }
 
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 430;
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
 uint32_t adreno_last_fence(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index a22fef5..d7477ff 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -9,16 +9,17 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
 - /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -172,6 +173,11 @@
 	CP_UNKNOWN_1A = 26,
 	CP_UNKNOWN_4E = 78,
 	CP_WIDE_REG_WRITE = 116,
+	CP_SCRATCH_TO_REG = 77,
+	CP_REG_TO_SCRATCH = 74,
+	CP_WAIT_MEM_WRITES = 18,
+	CP_COND_REG_EXEC = 71,
+	CP_MEM_TO_REG = 66,
 	IN_IB_PREFETCH_END = 23,
 	IN_SUBBLK_PREFETCH = 31,
 	IN_INSTR_PREFETCH = 32,
@@ -199,7 +205,11 @@
 
 enum adreno_state_src {
 	SS_DIRECT = 0,
+	SS_INVALID_ALL_IC = 2,
+	SS_INVALID_PART_IC = 3,
 	SS_INDIRECT = 4,
+	SS_INDIRECT_TCM = 5,
+	SS_INDIRECT_STM = 6,
 };
 
 enum a4xx_index_size {
@@ -227,7 +237,7 @@
 {
 	return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
 }
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK				0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK				0xffc00000
 #define CP_LOAD_STATE_0_NUM_UNIT__SHIFT				22
 static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
 {
@@ -499,5 +509,29 @@
 	return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
 }
 
+#define REG_CP_REG_TO_MEM_0					0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK				0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT				0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK				0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT				19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B					0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE				0x80000000
+
+#define REG_CP_REG_TO_MEM_1					0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK				0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT				0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+	return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
 
 #endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b2b5f3d..4958594 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2a827d8..e58e9b9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -57,10 +57,9 @@
 static const struct msm_dsi_config msm8916_dsi_cfg = {
 	.io_offset = DSI_6G_REG_SHIFT,
 	.reg_cfg = {
-		.num = 4,
+		.num = 3,
 		.regs = {
 			{"gdsc", -1, -1, -1, -1},
-			{"vdd", 2850000, 2850000, 100000, 100},
 			{"vdda", 1200000, 1200000, 100000, 100},
 			{"vddio", 1800000, 1800000, 100000, 100},
 		},
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 48f9967..4282ec6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -163,6 +163,10 @@
 	enum mipi_dsi_pixel_format format;
 	unsigned long mode_flags;
 
+	/* lane data parsed via DT */
+	int dlane_swap;
+	int num_data_lanes;
+
 	u32 dma_cmd_ctrl_restore;
 
 	bool registered;
@@ -845,19 +849,10 @@
 	data = DSI_CTRL_CLK_EN;
 
 	DBG("lane number=%d", msm_host->lanes);
-	if (msm_host->lanes == 2) {
-		data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
-		/* swap lanes for 2-lane panel for better performance */
-		dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
-			DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
-	} else {
-		/* Take 4 lanes as default */
-		data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
-			DSI_CTRL_LANE3;
-		/* Do not swap lanes for 4-lane panel */
-		dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
-			DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
-	}
+	data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+
+	dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+		  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
 
 	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
 		dsi_write(msm_host, REG_DSI_LANE_CTRL,
@@ -1479,13 +1474,14 @@
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 	int ret;
 
+	if (dsi->lanes > msm_host->num_data_lanes)
+		return -EINVAL;
+
 	msm_host->channel = dsi->channel;
 	msm_host->lanes = dsi->lanes;
 	msm_host->format = dsi->format;
 	msm_host->mode_flags = dsi->mode_flags;
 
-	WARN_ON(dsi->dev.of_node != msm_host->device_node);
-
 	/* Some gpios defined in panel DT need to be controlled by host */
 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
 	if (ret)
@@ -1534,6 +1530,75 @@
 	.transfer = dsi_host_transfer,
 };
 
+/*
+ * List of supported physical to logical lane mappings.
+ * For example, the 2nd entry represents the following mapping:
+ *
+ * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+ */
+static const int supported_data_lane_swaps[][4] = {
+	{ 0, 1, 2, 3 },
+	{ 3, 0, 1, 2 },
+	{ 2, 3, 0, 1 },
+	{ 1, 2, 3, 0 },
+	{ 0, 3, 2, 1 },
+	{ 1, 0, 3, 2 },
+	{ 2, 1, 0, 3 },
+	{ 3, 2, 1, 0 },
+};
+
+static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
+				    struct device_node *ep)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct property *prop;
+	u32 lane_map[4];
+	int ret, i, len, num_lanes;
+
+	prop = of_find_property(ep, "qcom,data-lane-map", &len);
+	if (!prop) {
+		dev_dbg(dev, "failed to find data lane mapping\n");
+		return -EINVAL;
+	}
+
+	num_lanes = len / sizeof(u32);
+
+	if (num_lanes < 1 || num_lanes > 4) {
+		dev_err(dev, "bad number of data lanes\n");
+		return -EINVAL;
+	}
+
+	msm_host->num_data_lanes = num_lanes;
+
+	ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+					 num_lanes);
+	if (ret) {
+		dev_err(dev, "failed to read lane data\n");
+		return ret;
+	}
+
+	/*
+	 * compare DT specified physical-logical lane mappings with the ones
+	 * supported by hardware
+	 */
+	for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
+		const int *swap = supported_data_lane_swaps[i];
+		int j;
+
+		for (j = 0; j < num_lanes; j++) {
+			if (swap[j] != lane_map[j])
+				break;
+		}
+
+		if (j == num_lanes) {
+			msm_host->dlane_swap = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
 {
 	struct device *dev = &msm_host->pdev->dev;
@@ -1560,17 +1625,21 @@
 		return 0;
 	}
 
+	ret = dsi_host_parse_lane_data(msm_host, endpoint);
+	if (ret) {
+		dev_err(dev, "%s: invalid lane configuration %d\n",
+			__func__, ret);
+		goto err;
+	}
+
 	/* Get panel node from the output port's endpoint data */
 	device_node = of_graph_get_remote_port_parent(endpoint);
 	if (!device_node) {
 		dev_err(dev, "%s: no valid device\n", __func__);
-		of_node_put(endpoint);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err;
 	}
 
-	of_node_put(endpoint);
-	of_node_put(device_node);
-
 	msm_host->device_node = device_node;
 
 	if (of_property_read_bool(np, "syscon-sfpb")) {
@@ -1579,11 +1648,16 @@
 		if (IS_ERR(msm_host->sfpb)) {
 			dev_err(dev, "%s: failed to get sfpb regmap\n",
 				__func__);
-			return PTR_ERR(msm_host->sfpb);
+			ret = PTR_ERR(msm_host->sfpb);
 		}
 	}
 
-	return 0;
+	of_node_put(device_node);
+
+err:
+	of_node_put(endpoint);
+
+	return ret;
 }
 
 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 80ec65e4..2d99949 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 80b6038..2cf1664 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -97,8 +97,8 @@
 struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
 					       int id);
 #else
-struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
-					       int id)
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
+	struct platform_device *pdev, int id)
 {
 	return ERR_PTR(-ENODEV);
 }
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 7d7662e..506434f 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 90bf5ed..f1072c1 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9a0989c..51b9ea5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -21,7 +21,7 @@
 
 #include "hdmi.h"
 
-void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
 {
 	uint32_t ctrl = 0;
 	unsigned long flags;
@@ -46,29 +46,27 @@
 			power_on ? "Enable" : "Disable", ctrl);
 }
 
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
 {
 	struct hdmi *hdmi = dev_id;
 
 	/* Process HPD: */
-	hdmi_connector_irq(hdmi->connector);
+	msm_hdmi_connector_irq(hdmi->connector);
 
 	/* Process DDC: */
-	hdmi_i2c_irq(hdmi->i2c);
+	msm_hdmi_i2c_irq(hdmi->i2c);
 
 	/* Process HDCP: */
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+		msm_hdmi_hdcp_irq(hdmi->hdcp_ctrl);
 
 	/* TODO audio.. */
 
 	return IRQ_HANDLED;
 }
 
-static void hdmi_destroy(struct hdmi *hdmi)
+static void msm_hdmi_destroy(struct hdmi *hdmi)
 {
-	struct hdmi_phy *phy = hdmi->phy;
-
 	/*
 	 * at this point, hpd has been disabled,
 	 * after flush workq, it's safe to deinit hdcp
@@ -77,21 +75,53 @@
 		flush_workqueue(hdmi->workq);
 		destroy_workqueue(hdmi->workq);
 	}
-	hdmi_hdcp_destroy(hdmi);
-	if (phy)
-		phy->funcs->destroy(phy);
+	msm_hdmi_hdcp_destroy(hdmi);
+
+	if (hdmi->phy_dev) {
+		put_device(hdmi->phy_dev);
+		hdmi->phy = NULL;
+		hdmi->phy_dev = NULL;
+	}
 
 	if (hdmi->i2c)
-		hdmi_i2c_destroy(hdmi->i2c);
+		msm_hdmi_i2c_destroy(hdmi->i2c);
 
 	platform_set_drvdata(hdmi->pdev, NULL);
 }
 
+static int msm_hdmi_get_phy(struct hdmi *hdmi)
+{
+	struct platform_device *pdev = hdmi->pdev;
+	struct platform_device *phy_pdev;
+	struct device_node *phy_node;
+
+	phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
+	if (!phy_node) {
+		dev_err(&pdev->dev, "cannot find phy device\n");
+		return -ENXIO;
+	}
+
+	phy_pdev = of_find_device_by_node(phy_node);
+	if (phy_pdev)
+		hdmi->phy = platform_get_drvdata(phy_pdev);
+
+	of_node_put(phy_node);
+
+	if (!phy_pdev || !hdmi->phy) {
+		dev_err(&pdev->dev, "phy driver is not ready\n");
+		return -EPROBE_DEFER;
+	}
+
+	hdmi->phy_dev = get_device(&phy_pdev->dev);
+
+	return 0;
+}
+
 /* construct hdmi at bind/probe time, grab all the resources.  If
  * we are to EPROBE_DEFER we want to do it here, rather than later
  * at modeset_init() time
  */
-static struct hdmi *hdmi_init(struct platform_device *pdev)
+static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
 {
 	struct hdmi_platform_config *config = pdev->dev.platform_data;
 	struct hdmi *hdmi = NULL;
@@ -108,18 +138,6 @@
 	hdmi->config = config;
 	spin_lock_init(&hdmi->reg_lock);
 
-	/* not sure about which phy maps to which msm.. probably I miss some */
-	if (config->phy_init) {
-		hdmi->phy = config->phy_init(hdmi);
-
-		if (IS_ERR(hdmi->phy)) {
-			ret = PTR_ERR(hdmi->phy);
-			dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
-			hdmi->phy = NULL;
-			goto fail;
-		}
-	}
-
 	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
 	if (IS_ERR(hdmi->mmio)) {
 		ret = PTR_ERR(hdmi->mmio);
@@ -222,7 +240,7 @@
 
 	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
 
-	hdmi->i2c = hdmi_i2c_init(hdmi);
+	hdmi->i2c = msm_hdmi_i2c_init(hdmi);
 	if (IS_ERR(hdmi->i2c)) {
 		ret = PTR_ERR(hdmi->i2c);
 		dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
@@ -230,7 +248,13 @@
 		goto fail;
 	}
 
-	hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+	ret = msm_hdmi_get_phy(hdmi);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to get phy\n");
+		goto fail;
+	}
+
+	hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi);
 	if (IS_ERR(hdmi->hdcp_ctrl)) {
 		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
 		hdmi->hdcp_ctrl = NULL;
@@ -240,7 +264,7 @@
 
 fail:
 	if (hdmi)
-		hdmi_destroy(hdmi);
+		msm_hdmi_destroy(hdmi);
 
 	return ERR_PTR(ret);
 }
@@ -250,10 +274,10 @@
  * driver (not hdmi sub-device's probe/bind!)
  *
  * Any resource (regulator/clk/etc) which could be missing at boot
- * should be handled in hdmi_init() so that failure happens from
+ * should be handled in msm_hdmi_init() so that failure happens from
  * hdmi sub-device's probe.
  */
-int hdmi_modeset_init(struct hdmi *hdmi,
+int msm_hdmi_modeset_init(struct hdmi *hdmi,
 		struct drm_device *dev, struct drm_encoder *encoder)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -265,7 +289,7 @@
 
 	hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
 
-	hdmi->bridge = hdmi_bridge_init(hdmi);
+	hdmi->bridge = msm_hdmi_bridge_init(hdmi);
 	if (IS_ERR(hdmi->bridge)) {
 		ret = PTR_ERR(hdmi->bridge);
 		dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -273,7 +297,7 @@
 		goto fail;
 	}
 
-	hdmi->connector = hdmi_connector_init(hdmi);
+	hdmi->connector = msm_hdmi_connector_init(hdmi);
 	if (IS_ERR(hdmi->connector)) {
 		ret = PTR_ERR(hdmi->connector);
 		dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
@@ -289,7 +313,7 @@
 	}
 
 	ret = devm_request_irq(&pdev->dev, hdmi->irq,
-			hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 			"hdmi_isr", hdmi);
 	if (ret < 0) {
 		dev_err(dev->dev, "failed to request IRQ%u: %d\n",
@@ -309,7 +333,7 @@
 fail:
 	/* bridge is normally destroyed by drm: */
 	if (hdmi->bridge) {
-		hdmi_bridge_destroy(hdmi->bridge);
+		msm_hdmi_bridge_destroy(hdmi->bridge);
 		hdmi->bridge = NULL;
 	}
 	if (hdmi->connector) {
@@ -331,15 +355,12 @@
 static const char *pwr_reg_names_none[] = {};
 static const char *hpd_reg_names_none[] = {};
 
-static struct hdmi_platform_config hdmi_tx_8660_config = {
-		.phy_init = hdmi_phy_8x60_init,
-};
+static struct hdmi_platform_config hdmi_tx_8660_config;
 
 static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
 static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
 
 static struct hdmi_platform_config hdmi_tx_8960_config = {
-		.phy_init = hdmi_phy_8960_init,
 		HDMI_CFG(hpd_reg, 8960),
 		HDMI_CFG(hpd_clk, 8960),
 };
@@ -351,7 +372,6 @@
 static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
 
 static struct hdmi_platform_config hdmi_tx_8974_config = {
-		.phy_init = hdmi_phy_8x74_init,
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, 8x74),
 		HDMI_CFG(pwr_clk, 8x74),
@@ -362,7 +382,6 @@
 static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"};
 
 static struct hdmi_platform_config hdmi_tx_8084_config = {
-		.phy_init = hdmi_phy_8x74_init,
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, 8084),
 		HDMI_CFG(pwr_clk, 8x74),
@@ -371,7 +390,6 @@
 };
 
 static struct hdmi_platform_config hdmi_tx_8994_config = {
-		.phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, none),
 		HDMI_CFG(pwr_clk, 8x74),
@@ -380,7 +398,6 @@
 };
 
 static struct hdmi_platform_config hdmi_tx_8996_config = {
-		.phy_init = NULL,
 		HDMI_CFG(pwr_reg, none),
 		HDMI_CFG(hpd_reg, none),
 		HDMI_CFG(pwr_clk, 8x74),
@@ -388,7 +405,21 @@
 		.hpd_freq      = hpd_clk_freq_8x74,
 };
 
-static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+static const struct {
+	const char *name;
+	const bool output;
+	const int value;
+	const char *label;
+} msm_hdmi_gpio_pdata[] = {
+	{ "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" },
+	{ "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" },
+	{ "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" },
+	{ "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" },
+	{ "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" },
+	{ "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" },
+};
+
+static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
 {
 	int gpio = of_get_named_gpio(of_node, name, 0);
 	if (gpio < 0) {
@@ -403,13 +434,14 @@
 	return gpio;
 }
 
-static int hdmi_bind(struct device *dev, struct device *master, void *data)
+static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct drm_device *drm = dev_get_drvdata(master);
 	struct msm_drm_private *priv = drm->dev_private;
 	static struct hdmi_platform_config *hdmi_cfg;
 	struct hdmi *hdmi;
 	struct device_node *of_node = dev->of_node;
+	int i;
 
 	hdmi_cfg = (struct hdmi_platform_config *)
 			of_device_get_match_data(dev);
@@ -420,16 +452,18 @@
 
 	hdmi_cfg->mmio_name     = "core_physical";
 	hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
-	hdmi_cfg->ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
-	hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
-	hdmi_cfg->hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
-	hdmi_cfg->mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
-	hdmi_cfg->mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
-	hdmi_cfg->mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
+
+	for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+		hdmi_cfg->gpios[i].num = msm_hdmi_get_gpio(of_node,
+						msm_hdmi_gpio_pdata[i].name);
+		hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output;
+		hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value;
+		hdmi_cfg->gpios[i].label = msm_hdmi_gpio_pdata[i].label;
+	}
 
 	dev->platform_data = hdmi_cfg;
 
-	hdmi = hdmi_init(to_platform_device(dev));
+	hdmi = msm_hdmi_init(to_platform_device(dev));
 	if (IS_ERR(hdmi))
 		return PTR_ERR(hdmi);
 	priv->hdmi = hdmi;
@@ -437,34 +471,34 @@
 	return 0;
 }
 
-static void hdmi_unbind(struct device *dev, struct device *master,
+static void msm_hdmi_unbind(struct device *dev, struct device *master,
 		void *data)
 {
 	struct drm_device *drm = dev_get_drvdata(master);
 	struct msm_drm_private *priv = drm->dev_private;
 	if (priv->hdmi) {
-		hdmi_destroy(priv->hdmi);
+		msm_hdmi_destroy(priv->hdmi);
 		priv->hdmi = NULL;
 	}
 }
 
-static const struct component_ops hdmi_ops = {
-		.bind   = hdmi_bind,
-		.unbind = hdmi_unbind,
+static const struct component_ops msm_hdmi_ops = {
+		.bind   = msm_hdmi_bind,
+		.unbind = msm_hdmi_unbind,
 };
 
-static int hdmi_dev_probe(struct platform_device *pdev)
+static int msm_hdmi_dev_probe(struct platform_device *pdev)
 {
-	return component_add(&pdev->dev, &hdmi_ops);
+	return component_add(&pdev->dev, &msm_hdmi_ops);
 }
 
-static int hdmi_dev_remove(struct platform_device *pdev)
+static int msm_hdmi_dev_remove(struct platform_device *pdev)
 {
-	component_del(&pdev->dev, &hdmi_ops);
+	component_del(&pdev->dev, &msm_hdmi_ops);
 	return 0;
 }
 
-static const struct of_device_id dt_match[] = {
+static const struct of_device_id msm_hdmi_dt_match[] = {
 	{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
 	{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
 	{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -474,21 +508,23 @@
 	{}
 };
 
-static struct platform_driver hdmi_driver = {
-	.probe = hdmi_dev_probe,
-	.remove = hdmi_dev_remove,
+static struct platform_driver msm_hdmi_driver = {
+	.probe = msm_hdmi_dev_probe,
+	.remove = msm_hdmi_dev_remove,
 	.driver = {
 		.name = "hdmi_msm",
-		.of_match_table = dt_match,
+		.of_match_table = msm_hdmi_dt_match,
 	},
 };
 
-void __init hdmi_register(void)
+void __init msm_hdmi_register(void)
 {
-	platform_driver_register(&hdmi_driver);
+	msm_hdmi_phy_driver_register();
+	platform_driver_register(&msm_hdmi_driver);
 }
 
-void __exit hdmi_unregister(void)
+void __exit msm_hdmi_unregister(void)
 {
-	platform_driver_unregister(&hdmi_driver);
+	platform_driver_unregister(&msm_hdmi_driver);
+	msm_hdmi_phy_driver_unregister();
 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0e6631..b04a646 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -27,10 +27,18 @@
 #include "msm_drv.h"
 #include "hdmi.xml.h"
 
+#define HDMI_MAX_NUM_GPIO	6
 
 struct hdmi_phy;
 struct hdmi_platform_config;
 
+struct hdmi_gpio_data {
+	int num;
+	bool output;
+	int value;
+	const char *label;
+};
+
 struct hdmi_audio {
 	bool enabled;
 	struct hdmi_audio_infoframe infoframe;
@@ -62,6 +70,8 @@
 	struct clk **pwr_clks;
 
 	struct hdmi_phy *phy;
+	struct device *phy_dev;
+
 	struct i2c_adapter *i2c;
 	struct drm_connector *connector;
 	struct drm_bridge *bridge;
@@ -88,7 +98,6 @@
 
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
-	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
 	const char *mmio_name;
 	const char *qfprom_mmio_name;
 
@@ -110,11 +119,10 @@
 	int pwr_clk_cnt;
 
 	/* gpio's: */
-	int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
-	int mux_lpm_gpio;
+	struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
 };
 
-void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
 
 static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
 {
@@ -132,65 +140,113 @@
 }
 
 /*
- * The phy appears to be different, for example between 8960 and 8x60,
- * so split the phy related functions out and load the correct one at
- * runtime:
+ * hdmi phy:
  */
 
-struct hdmi_phy_funcs {
-	void (*destroy)(struct hdmi_phy *phy);
+enum hdmi_phy_type {
+	MSM_HDMI_PHY_8x60,
+	MSM_HDMI_PHY_8960,
+	MSM_HDMI_PHY_8x74,
+	MSM_HDMI_PHY_8996,
+	MSM_HDMI_PHY_MAX,
+};
+
+struct hdmi_phy_cfg {
+	enum hdmi_phy_type type;
 	void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
 	void (*powerdown)(struct hdmi_phy *phy);
+	const char * const *reg_names;
+	int num_regs;
+	const char * const *clk_names;
+	int num_clks;
 };
 
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+
 struct hdmi_phy {
+	struct platform_device *pdev;
+	void __iomem *mmio;
+	struct hdmi_phy_cfg *cfg;
 	const struct hdmi_phy_funcs *funcs;
+	struct regulator **regs;
+	struct clk **clks;
 };
 
-struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
-struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
-struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
+static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data)
+{
+	msm_writel(data, phy->mmio + reg);
+}
+
+static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg)
+{
+	return msm_readl(phy->mmio + reg);
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy);
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy);
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock);
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy);
+void __init msm_hdmi_phy_driver_register(void);
+void __exit msm_hdmi_phy_driver_unregister(void);
+
+#ifdef CONFIG_COMMON_CLK
+int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+#else
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+{
+	return -ENODEV;
+}
+
+static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+#endif
 
 /*
  * audio:
  */
 
-int hdmi_audio_update(struct hdmi *hdmi);
-int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+int msm_hdmi_audio_update(struct hdmi *hdmi);
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
 	uint32_t num_of_channels, uint32_t channel_allocation,
 	uint32_t level_shift, bool down_mix);
-void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
 
 
 /*
  * hdmi bridge:
  */
 
-struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
-void hdmi_bridge_destroy(struct drm_bridge *bridge);
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
 
 /*
  * hdmi connector:
  */
 
-void hdmi_connector_irq(struct drm_connector *connector);
-struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
+void msm_hdmi_connector_irq(struct drm_connector *connector);
+struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
 
 /*
  * i2c adapter for ddc:
  */
 
-void hdmi_i2c_irq(struct i2c_adapter *i2c);
-void hdmi_i2c_destroy(struct i2c_adapter *i2c);
-struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c);
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
 
 /*
  * hdcp
  */
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
-void hdmi_hdcp_destroy(struct hdmi *hdmi);
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
 
 #endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 10c4570..34c7df6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
@@ -559,7 +560,7 @@
 
 #define REG_HDMI_CEC_WR_CHECK_CONFIG				0x00000370
 
-#define REG_HDMI_8x60_PHY_REG0					0x00000300
+#define REG_HDMI_8x60_PHY_REG0					0x00000000
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK			0x0000001c
 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT		2
 static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
@@ -567,7 +568,7 @@
 	return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
 }
 
-#define REG_HDMI_8x60_PHY_REG1					0x00000304
+#define REG_HDMI_8x60_PHY_REG1					0x00000004
 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK			0x000000f0
 #define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT			4
 static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
@@ -581,7 +582,7 @@
 	return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
 }
 
-#define REG_HDMI_8x60_PHY_REG2					0x00000308
+#define REG_HDMI_8x60_PHY_REG2					0x00000008
 #define HDMI_8x60_PHY_REG2_PD_DESER				0x00000001
 #define HDMI_8x60_PHY_REG2_PD_DRIVE_1				0x00000002
 #define HDMI_8x60_PHY_REG2_PD_DRIVE_2				0x00000004
@@ -591,152 +592,152 @@
 #define HDMI_8x60_PHY_REG2_PD_PWRGEN				0x00000040
 #define HDMI_8x60_PHY_REG2_RCV_SENSE_EN				0x00000080
 
-#define REG_HDMI_8x60_PHY_REG3					0x0000030c
+#define REG_HDMI_8x60_PHY_REG3					0x0000000c
 #define HDMI_8x60_PHY_REG3_PLL_ENABLE				0x00000001
 
-#define REG_HDMI_8x60_PHY_REG4					0x00000310
+#define REG_HDMI_8x60_PHY_REG4					0x00000010
 
-#define REG_HDMI_8x60_PHY_REG5					0x00000314
+#define REG_HDMI_8x60_PHY_REG5					0x00000014
 
-#define REG_HDMI_8x60_PHY_REG6					0x00000318
+#define REG_HDMI_8x60_PHY_REG6					0x00000018
 
-#define REG_HDMI_8x60_PHY_REG7					0x0000031c
+#define REG_HDMI_8x60_PHY_REG7					0x0000001c
 
-#define REG_HDMI_8x60_PHY_REG8					0x00000320
+#define REG_HDMI_8x60_PHY_REG8					0x00000020
 
-#define REG_HDMI_8x60_PHY_REG9					0x00000324
+#define REG_HDMI_8x60_PHY_REG9					0x00000024
 
-#define REG_HDMI_8x60_PHY_REG10					0x00000328
+#define REG_HDMI_8x60_PHY_REG10					0x00000028
 
-#define REG_HDMI_8x60_PHY_REG11					0x0000032c
+#define REG_HDMI_8x60_PHY_REG11					0x0000002c
 
-#define REG_HDMI_8x60_PHY_REG12					0x00000330
+#define REG_HDMI_8x60_PHY_REG12					0x00000030
 #define HDMI_8x60_PHY_REG12_RETIMING_EN				0x00000001
 #define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN			0x00000002
 #define HDMI_8x60_PHY_REG12_FORCE_LOCK				0x00000010
 
-#define REG_HDMI_8960_PHY_REG0					0x00000400
+#define REG_HDMI_8960_PHY_REG0					0x00000000
 
-#define REG_HDMI_8960_PHY_REG1					0x00000404
+#define REG_HDMI_8960_PHY_REG1					0x00000004
 
-#define REG_HDMI_8960_PHY_REG2					0x00000408
+#define REG_HDMI_8960_PHY_REG2					0x00000008
 
-#define REG_HDMI_8960_PHY_REG3					0x0000040c
+#define REG_HDMI_8960_PHY_REG3					0x0000000c
 
-#define REG_HDMI_8960_PHY_REG4					0x00000410
+#define REG_HDMI_8960_PHY_REG4					0x00000010
 
-#define REG_HDMI_8960_PHY_REG5					0x00000414
+#define REG_HDMI_8960_PHY_REG5					0x00000014
 
-#define REG_HDMI_8960_PHY_REG6					0x00000418
+#define REG_HDMI_8960_PHY_REG6					0x00000018
 
-#define REG_HDMI_8960_PHY_REG7					0x0000041c
+#define REG_HDMI_8960_PHY_REG7					0x0000001c
 
-#define REG_HDMI_8960_PHY_REG8					0x00000420
+#define REG_HDMI_8960_PHY_REG8					0x00000020
 
-#define REG_HDMI_8960_PHY_REG9					0x00000424
+#define REG_HDMI_8960_PHY_REG9					0x00000024
 
-#define REG_HDMI_8960_PHY_REG10					0x00000428
+#define REG_HDMI_8960_PHY_REG10					0x00000028
 
-#define REG_HDMI_8960_PHY_REG11					0x0000042c
+#define REG_HDMI_8960_PHY_REG11					0x0000002c
 
-#define REG_HDMI_8960_PHY_REG12					0x00000430
+#define REG_HDMI_8960_PHY_REG12					0x00000030
 #define HDMI_8960_PHY_REG12_SW_RESET				0x00000020
 #define HDMI_8960_PHY_REG12_PWRDN_B				0x00000080
 
-#define REG_HDMI_8960_PHY_REG_BIST_CFG				0x00000434
+#define REG_HDMI_8960_PHY_REG_BIST_CFG				0x00000034
 
-#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL				0x00000438
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL				0x00000038
 
-#define REG_HDMI_8960_PHY_REG_MISC0				0x0000043c
+#define REG_HDMI_8960_PHY_REG_MISC0				0x0000003c
 
-#define REG_HDMI_8960_PHY_REG13					0x00000440
+#define REG_HDMI_8960_PHY_REG13					0x00000040
 
-#define REG_HDMI_8960_PHY_REG14					0x00000444
+#define REG_HDMI_8960_PHY_REG14					0x00000044
 
-#define REG_HDMI_8960_PHY_REG15					0x00000448
+#define REG_HDMI_8960_PHY_REG15					0x00000048
 
-#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG			0x00000500
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG			0x00000000
 
-#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG			0x00000504
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG			0x00000004
 
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0			0x00000508
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0			0x00000008
 
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1			0x0000050c
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1			0x0000000c
 
-#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG			0x00000510
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG			0x00000010
 
-#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG			0x00000514
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG			0x00000014
 
-#define REG_HDMI_8960_PHY_PLL_PWRDN_B				0x00000518
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B				0x00000018
 #define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL			0x00000002
 #define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B			0x00000008
 
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG0				0x0000051c
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0				0x0000001c
 
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG1				0x00000520
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1				0x00000020
 
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG2				0x00000524
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2				0x00000024
 
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG3				0x00000528
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3				0x00000028
 
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG4				0x0000052c
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4				0x0000002c
 
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG0				0x00000530
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0				0x00000030
 
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG1				0x00000534
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1				0x00000034
 
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG2				0x00000538
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2				0x00000038
 
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG3				0x0000053c
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3				0x0000003c
 
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0			0x00000540
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0			0x00000040
 
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1			0x00000544
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1			0x00000044
 
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2			0x00000548
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2			0x00000048
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0			0x0000054c
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0			0x0000004c
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1			0x00000550
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1			0x00000050
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2			0x00000554
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2			0x00000054
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3			0x00000558
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3			0x00000058
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4			0x0000055c
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4			0x0000005c
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5			0x00000560
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5			0x00000060
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6			0x00000564
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6			0x00000064
 
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7			0x00000568
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7			0x00000068
 
-#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL				0x0000056c
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL				0x0000006c
 
-#define REG_HDMI_8960_PHY_PLL_MISC0				0x00000570
+#define REG_HDMI_8960_PHY_PLL_MISC0				0x00000070
 
-#define REG_HDMI_8960_PHY_PLL_MISC1				0x00000574
+#define REG_HDMI_8960_PHY_PLL_MISC1				0x00000074
 
-#define REG_HDMI_8960_PHY_PLL_MISC2				0x00000578
+#define REG_HDMI_8960_PHY_PLL_MISC2				0x00000078
 
-#define REG_HDMI_8960_PHY_PLL_MISC3				0x0000057c
+#define REG_HDMI_8960_PHY_PLL_MISC3				0x0000007c
 
-#define REG_HDMI_8960_PHY_PLL_MISC4				0x00000580
+#define REG_HDMI_8960_PHY_PLL_MISC4				0x00000080
 
-#define REG_HDMI_8960_PHY_PLL_MISC5				0x00000584
+#define REG_HDMI_8960_PHY_PLL_MISC5				0x00000084
 
-#define REG_HDMI_8960_PHY_PLL_MISC6				0x00000588
+#define REG_HDMI_8960_PHY_PLL_MISC6				0x00000088
 
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0			0x0000058c
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0			0x0000008c
 
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1			0x00000590
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1			0x00000090
 
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2			0x00000594
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2			0x00000094
 
-#define REG_HDMI_8960_PHY_PLL_STATUS0				0x00000598
+#define REG_HDMI_8960_PHY_PLL_STATUS0				0x00000098
 #define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK			0x00000001
 
-#define REG_HDMI_8960_PHY_PLL_STATUS1				0x0000059c
+#define REG_HDMI_8960_PHY_PLL_STATUS1				0x0000009c
 
 #define REG_HDMI_8x74_ANA_CFG0					0x00000000
 
@@ -843,5 +844,501 @@
 
 #define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL			0x000000a0
 
+#define REG_HDMI_8996_PHY_CFG					0x00000000
+
+#define REG_HDMI_8996_PHY_PD_CTL				0x00000004
+
+#define REG_HDMI_8996_PHY_MODE					0x00000008
+
+#define REG_HDMI_8996_PHY_MISR_CLEAR				0x0000000c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0			0x00000010
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1			0x00000014
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0		0x00000018
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1		0x0000001c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0			0x00000020
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1			0x00000024
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0			0x00000028
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1			0x0000002c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0		0x00000030
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1		0x00000034
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0			0x00000038
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1			0x0000003c
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL				0x00000040
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG0				0x00000044
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG1				0x00000048
+
+#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL			0x0000004c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL			0x00000050
+
+#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG			0x00000054
+
+#define REG_HDMI_8996_PHY_CLOCK					0x00000058
+
+#define REG_HDMI_8996_PHY_MISC1					0x0000005c
+
+#define REG_HDMI_8996_PHY_MISC2					0x00000060
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0			0x00000064
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1			0x00000068
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2			0x0000006c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0			0x00000070
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1			0x00000074
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2			0x00000078
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0			0x0000007c
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1			0x00000080
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2			0x00000084
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3			0x00000088
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS0			0x0000008c
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS1			0x00000090
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS2			0x00000094
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS3			0x00000098
+
+#define REG_HDMI_8996_PHY_STATUS				0x0000009c
+
+#define REG_HDMI_8996_PHY_MISC3_STATUS				0x000000a0
+
+#define REG_HDMI_8996_PHY_MISC4_STATUS				0x000000a4
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS0				0x000000a8
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS1				0x000000ac
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS2				0x000000b0
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS3				0x000000b4
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID0			0x000000b8
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID1			0x000000bc
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID2			0x000000c0
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID3			0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1			0x00000000
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2			0x00000004
+
+#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE			0x00000008
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER			0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER			0x00000010
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1			0x00000014
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2			0x00000018
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1			0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2			0x00000020
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1			0x00000024
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2			0x00000028
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV			0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX			0x00000030
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x00000034
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1			0x00000038
+
+#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL			0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE		0x00000040
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_EN				0x00000044
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO			0x00000048
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0		0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0		0x00000050
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0		0x00000054
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1		0x00000058
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1		0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1		0x00000060
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2		0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0			0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2		0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL		0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2		0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS		0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM			0x00000070
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV			0x00000074
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0			0x00000078
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1			0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2			0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1			0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0		0x00000084
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1		0x00000088
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2		0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2			0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0		0x00000090
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1		0x00000094
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2		0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3			0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL			0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL			0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC			0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL		0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL			0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL			0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL			0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2			0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL			0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2			0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM		0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN			0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG			0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0		0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1		0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2		0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0		0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0		0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0		0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1		0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1		0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1		0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2		0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1		0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2		0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2		0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2		0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4			0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL		0x00000100
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN			0x00000104
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0		0x00000108
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0		0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1		0x00000110
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1		0x00000114
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2		0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1		0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2		0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2		0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2		0x00000120
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL			0x00000124
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP			0x00000128
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0		0x0000012c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0		0x00000130
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1		0x00000134
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1		0x00000138
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2		0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1		0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2		0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2		0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1		0x00000144
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2		0x00000148
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR				0x0000014c
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK			0x00000150
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS		0x00000154
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS		0x00000158
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS			0x0000015c
+
+#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS		0x00000160
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS		0x00000164
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS		0x00000168
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS		0x0000016c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL			0x00000170
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT			0x00000174
+
+#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL			0x00000178
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS	0x0000017c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG			0x00000180
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV			0x00000184
+
+#define REG_HDMI_PHY_QSERDES_COM_SW_RESET			0x00000188
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN			0x0000018c
+
+#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS			0x00000190
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG			0x00000194
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE		0x00000198
+
+#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL		0x0000019c
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0			0x000001a0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1			0x000001a4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2			0x000001a8
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3			0x000001ac
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL			0x000001b0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1			0x000001b4
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2			0x000001b8
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1		0x000001bc
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2		0x000001c0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5			0x000001c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO		0x00000000
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT			0x00000004
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE		0x00000008
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE		0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO		0x00000010
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE		0x00000014
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL		0x00000018
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH		0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN		0x00000020
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES		0x00000024
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP	0x00000028
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL			0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET		0x00000030
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN		0x00000034
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN	0x00000038
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND			0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL			0x00000040
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT		0x00000044
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN			0x00000048
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX		0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX		0x00000050
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET		0x00000054
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1			0x00000058
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2			0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT		0x00000060
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL		0x00000064
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	0x00000068
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV			0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN	0x00000070
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1		0x00000074
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2		0x00000078
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3		0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4		0x00000080
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5		0x00000084
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6		0x00000088
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7		0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8		0x00000090
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE			0x00000094
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE		0x00000098
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION	0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1			0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2			0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL		0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2		0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1			0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2			0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3			0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4			0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN			0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES		0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN		0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE		0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL			0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA		0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2	0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2	0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2	0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2	0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1	0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1	0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1	0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1	0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1			0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2			0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL	0x00000100
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS			0x00000104
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1		0x00000108
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2		0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV		0x00000110
+
 
 #endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index df232e2..a54d3bb 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -89,7 +89,7 @@
 	return NULL;
 }
 
-int hdmi_audio_update(struct hdmi *hdmi)
+int msm_hdmi_audio_update(struct hdmi *hdmi)
 {
 	struct hdmi_audio *audio = &hdmi->audio;
 	struct hdmi_audio_infoframe *info = &audio->infoframe;
@@ -232,7 +232,7 @@
 	return 0;
 }
 
-int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
 	uint32_t num_of_channels, uint32_t channel_allocation,
 	uint32_t level_shift, bool down_mix)
 {
@@ -252,10 +252,10 @@
 	audio->infoframe.level_shift_value = level_shift;
 	audio->infoframe.downmix_inhibit = down_mix;
 
-	return hdmi_audio_update(hdmi);
+	return msm_hdmi_audio_update(hdmi);
 }
 
-void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
 {
 	struct hdmi_audio *audio;
 
@@ -268,5 +268,5 @@
 		return;
 
 	audio->rate = rate;
-	hdmi_audio_update(hdmi);
+	msm_hdmi_audio_update(hdmi);
 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 92b69ae..bacbd5d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -23,11 +23,11 @@
 };
 #define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
 
-void hdmi_bridge_destroy(struct drm_bridge *bridge)
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
 {
 }
 
-static void power_on(struct drm_bridge *bridge)
+static void msm_hdmi_power_on(struct drm_bridge *bridge)
 {
 	struct drm_device *dev = bridge->dev;
 	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -86,7 +86,7 @@
 	}
 }
 
-static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
 {
 	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
 	struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -95,51 +95,51 @@
 	DBG("power up");
 
 	if (!hdmi->power_on) {
-		power_on(bridge);
+		msm_hdmi_phy_resource_enable(phy);
+		msm_hdmi_power_on(bridge);
 		hdmi->power_on = true;
-		hdmi_audio_update(hdmi);
+		msm_hdmi_audio_update(hdmi);
 	}
 
-	if (phy)
-		phy->funcs->powerup(phy, hdmi->pixclock);
+	msm_hdmi_phy_powerup(phy, hdmi->pixclock);
 
-	hdmi_set_mode(hdmi, true);
+	msm_hdmi_set_mode(hdmi, true);
 
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_on(hdmi->hdcp_ctrl);
+		msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
 }
 
-static void hdmi_bridge_enable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_enable(struct drm_bridge *bridge)
 {
 }
 
-static void hdmi_bridge_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_disable(struct drm_bridge *bridge)
 {
 }
 
-static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
 {
 	struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
 	struct hdmi *hdmi = hdmi_bridge->hdmi;
 	struct hdmi_phy *phy = hdmi->phy;
 
 	if (hdmi->hdcp_ctrl)
-		hdmi_hdcp_off(hdmi->hdcp_ctrl);
+		msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
 
 	DBG("power down");
-	hdmi_set_mode(hdmi, false);
+	msm_hdmi_set_mode(hdmi, false);
 
-	if (phy)
-		phy->funcs->powerdown(phy);
+	msm_hdmi_phy_powerdown(phy);
 
 	if (hdmi->power_on) {
 		power_off(bridge);
 		hdmi->power_on = false;
-		hdmi_audio_update(hdmi);
+		msm_hdmi_audio_update(hdmi);
+		msm_hdmi_phy_resource_disable(phy);
 	}
 }
 
-static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
+static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
 		 struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode)
 {
@@ -196,20 +196,20 @@
 	DBG("frame_ctrl=%08x", frame_ctrl);
 	hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
 
-	hdmi_audio_update(hdmi);
+	msm_hdmi_audio_update(hdmi);
 }
 
-static const struct drm_bridge_funcs hdmi_bridge_funcs = {
-		.pre_enable = hdmi_bridge_pre_enable,
-		.enable = hdmi_bridge_enable,
-		.disable = hdmi_bridge_disable,
-		.post_disable = hdmi_bridge_post_disable,
-		.mode_set = hdmi_bridge_mode_set,
+static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
+		.pre_enable = msm_hdmi_bridge_pre_enable,
+		.enable = msm_hdmi_bridge_enable,
+		.disable = msm_hdmi_bridge_disable,
+		.post_disable = msm_hdmi_bridge_post_disable,
+		.mode_set = msm_hdmi_bridge_mode_set,
 };
 
 
 /* initialize bridge */
-struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
 {
 	struct drm_bridge *bridge = NULL;
 	struct hdmi_bridge *hdmi_bridge;
@@ -225,7 +225,7 @@
 	hdmi_bridge->hdmi = hdmi;
 
 	bridge = &hdmi_bridge->base;
-	bridge->funcs = &hdmi_bridge_funcs;
+	bridge->funcs = &msm_hdmi_bridge_funcs;
 
 	ret = drm_bridge_attach(hdmi->dev, bridge);
 	if (ret)
@@ -235,7 +235,7 @@
 
 fail:
 	if (bridge)
-		hdmi_bridge_destroy(bridge);
+		msm_hdmi_bridge_destroy(bridge);
 
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index a3b05ae..26129bf 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,7 +28,7 @@
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
-static void hdmi_phy_reset(struct hdmi *hdmi)
+static void msm_hdmi_phy_reset(struct hdmi *hdmi)
 {
 	unsigned int val;
 
@@ -81,114 +81,54 @@
 {
 	struct device *dev = &hdmi->pdev->dev;
 	const struct hdmi_platform_config *config = hdmi->config;
-	int ret;
+	int ret, i;
 
 	if (on) {
-		if (config->ddc_clk_gpio != -1) {
-			ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
-			if (ret) {
-				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-					"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
-				goto error1;
+		for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+			struct hdmi_gpio_data gpio = config->gpios[i];
+
+			if (gpio.num != -1) {
+				ret = gpio_request(gpio.num, gpio.label);
+				if (ret) {
+					dev_err(dev,
+						"'%s'(%d) gpio_request failed: %d\n",
+						gpio.label, gpio.num, ret);
+					goto err;
+				}
+
+				if (gpio.output) {
+					gpio_direction_output(gpio.num,
+							      gpio.value);
+				} else {
+					gpio_direction_input(gpio.num);
+					gpio_set_value_cansleep(gpio.num,
+								gpio.value);
+				}
 			}
-			gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 		}
 
-		if (config->ddc_data_gpio != -1) {
-			ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
-			if (ret) {
-				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-					"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
-				goto error2;
-			}
-			gpio_set_value_cansleep(config->ddc_data_gpio, 1);
-		}
-
-		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_HPD", config->hpd_gpio, ret);
-			goto error3;
-		}
-		gpio_direction_input(config->hpd_gpio);
-		gpio_set_value_cansleep(config->hpd_gpio, 1);
-
-		if (config->mux_en_gpio != -1) {
-			ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
-			if (ret) {
-				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-					"HDMI_MUX_EN", config->mux_en_gpio, ret);
-				goto error4;
-			}
-			gpio_set_value_cansleep(config->mux_en_gpio, 1);
-		}
-
-		if (config->mux_sel_gpio != -1) {
-			ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
-			if (ret) {
-				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-					"HDMI_MUX_SEL", config->mux_sel_gpio, ret);
-				goto error5;
-			}
-			gpio_set_value_cansleep(config->mux_sel_gpio, 0);
-		}
-
-		if (config->mux_lpm_gpio != -1) {
-			ret = gpio_request(config->mux_lpm_gpio,
-					"HDMI_MUX_LPM");
-			if (ret) {
-				dev_err(dev,
-					"'%s'(%d) gpio_request failed: %d\n",
-					"HDMI_MUX_LPM",
-					config->mux_lpm_gpio, ret);
-				goto error6;
-			}
-			gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
-		}
 		DBG("gpio on");
 	} else {
-		if (config->ddc_clk_gpio != -1)
-			gpio_free(config->ddc_clk_gpio);
+		for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+			struct hdmi_gpio_data gpio = config->gpios[i];
 
-		if (config->ddc_data_gpio != -1)
-			gpio_free(config->ddc_data_gpio);
+			if (gpio.output) {
+				int value = gpio.value ? 0 : 1;
 
-		gpio_free(config->hpd_gpio);
+				gpio_set_value_cansleep(gpio.num, value);
+			}
 
-		if (config->mux_en_gpio != -1) {
-			gpio_set_value_cansleep(config->mux_en_gpio, 0);
-			gpio_free(config->mux_en_gpio);
-		}
+			gpio_free(gpio.num);
+		};
 
-		if (config->mux_sel_gpio != -1) {
-			gpio_set_value_cansleep(config->mux_sel_gpio, 1);
-			gpio_free(config->mux_sel_gpio);
-		}
-
-		if (config->mux_lpm_gpio != -1) {
-			gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
-			gpio_free(config->mux_lpm_gpio);
-		}
 		DBG("gpio off");
 	}
 
 	return 0;
+err:
+	while (i--)
+		gpio_free(config->gpios[i].num);
 
-error6:
-	if (config->mux_sel_gpio != -1)
-		gpio_free(config->mux_sel_gpio);
-error5:
-	if (config->mux_en_gpio != -1)
-		gpio_free(config->mux_en_gpio);
-error4:
-	gpio_free(config->hpd_gpio);
-error3:
-	if (config->ddc_data_gpio != -1)
-		gpio_free(config->ddc_data_gpio);
-error2:
-	if (config->ddc_clk_gpio != -1)
-		gpio_free(config->ddc_clk_gpio);
-error1:
 	return ret;
 }
 
@@ -239,9 +179,9 @@
 		}
 	}
 
-	hdmi_set_mode(hdmi, false);
-	hdmi_phy_reset(hdmi);
-	hdmi_set_mode(hdmi, true);
+	msm_hdmi_set_mode(hdmi, false);
+	msm_hdmi_phy_reset(hdmi);
+	msm_hdmi_set_mode(hdmi, true);
 
 	hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
 
@@ -278,7 +218,7 @@
 	/* Disable HPD interrupt */
 	hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
 
-	hdmi_set_mode(hdmi, false);
+	msm_hdmi_set_mode(hdmi, false);
 
 	for (i = 0; i < config->hpd_clk_cnt; i++)
 		clk_disable_unprepare(hdmi->hpd_clks[i]);
@@ -300,7 +240,7 @@
 }
 
 static void
-hotplug_work(struct work_struct *work)
+msm_hdmi_hotplug_work(struct work_struct *work)
 {
 	struct hdmi_connector *hdmi_connector =
 		container_of(work, struct hdmi_connector, hpd_work);
@@ -308,7 +248,7 @@
 	drm_helper_hpd_irq_event(connector->dev);
 }
 
-void hdmi_connector_irq(struct drm_connector *connector)
+void msm_hdmi_connector_irq(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
 	struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -345,10 +285,13 @@
 			connector_status_connected : connector_status_disconnected;
 }
 
+#define HPD_GPIO_INDEX	2
 static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
 {
 	const struct hdmi_platform_config *config = hdmi->config;
-	return gpio_get_value(config->hpd_gpio) ?
+	struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
+
+	return gpio_get_value(hpd_gpio.num) ?
 			connector_status_connected :
 			connector_status_disconnected;
 }
@@ -358,9 +301,18 @@
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
 	struct hdmi *hdmi = hdmi_connector->hdmi;
+	const struct hdmi_platform_config *config = hdmi->config;
+	struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
 	enum drm_connector_status stat_gpio, stat_reg;
 	int retry = 20;
 
+	/*
+	 * some platforms may not have hpd gpio. Rely only on the status
+	 * provided by REG_HDMI_HPD_INT_STATUS in this case.
+	 */
+	if (hpd_gpio.num == -1)
+		return detect_reg(hdmi);
+
 	do {
 		stat_gpio = detect_gpio(hdmi);
 		stat_reg  = detect_reg(hdmi);
@@ -395,7 +347,7 @@
 	kfree(hdmi_connector);
 }
 
-static int hdmi_connector_get_modes(struct drm_connector *connector)
+static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
 	struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -421,7 +373,7 @@
 	return ret;
 }
 
-static int hdmi_connector_mode_valid(struct drm_connector *connector,
+static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
@@ -451,7 +403,7 @@
 }
 
 static struct drm_encoder *
-hdmi_connector_best_encoder(struct drm_connector *connector)
+msm_hdmi_connector_best_encoder(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
 	return hdmi_connector->hdmi->encoder;
@@ -467,14 +419,14 @@
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
-	.get_modes = hdmi_connector_get_modes,
-	.mode_valid = hdmi_connector_mode_valid,
-	.best_encoder = hdmi_connector_best_encoder,
+static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
+	.get_modes = msm_hdmi_connector_get_modes,
+	.mode_valid = msm_hdmi_connector_mode_valid,
+	.best_encoder = msm_hdmi_connector_best_encoder,
 };
 
 /* initialize connector */
-struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
+struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
 {
 	struct drm_connector *connector = NULL;
 	struct hdmi_connector *hdmi_connector;
@@ -487,13 +439,13 @@
 	}
 
 	hdmi_connector->hdmi = hdmi;
-	INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
+	INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
 
 	connector = &hdmi_connector->base;
 
 	drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
 			DRM_MODE_CONNECTOR_HDMIA);
-	drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+	drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
 
 	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
 			DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 1dc9c34..0baaaaa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -84,7 +84,7 @@
 	bool max_dev_exceeded;
 };
 
-static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+static int msm_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
 	u8 *data, u16 data_len)
 {
 	int rc;
@@ -122,7 +122,7 @@
 
 #define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
 
-static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+static int msm_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
 	u8 *data, u16 data_len)
 {
 	int rc;
@@ -162,7 +162,7 @@
 	return rc;
 }
 
-static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+static int msm_hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
 	u32 *pdata, u32 count)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -202,7 +202,7 @@
 	return ret;
 }
 
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val, hdcp_int_status;
@@ -247,7 +247,7 @@
 	}
 }
 
-static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+static int msm_hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
 {
 	int rc;
 
@@ -264,7 +264,7 @@
 	return 0;
 }
 
-static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 
@@ -287,7 +287,7 @@
 	return 0;
 }
 
-static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val, failure, nack0;
@@ -337,7 +337,7 @@
 		reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
 		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 
 		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
 		reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
@@ -350,7 +350,7 @@
 
 		/* If previous msleep is aborted, skip this msleep */
 		if (!rc)
-			rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+			rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 
 		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
 		reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
@@ -362,7 +362,7 @@
 	return rc;
 }
 
-static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
 	u32 hdcp_ddc_status, ddc_hw_status;
@@ -394,7 +394,7 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
@@ -402,7 +402,7 @@
 	return 0;
 }
 
-static void hdmi_hdcp_reauth_work(struct work_struct *work)
+static void msm_hdmi_hdcp_reauth_work(struct work_struct *work)
 {
 	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
 		struct hdmi_hdcp_ctrl, hdcp_reauth_work);
@@ -430,7 +430,7 @@
 		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
 
 	/* Wait to be clean on DDC HW engine */
-	if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+	if (msm_hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
 		pr_info("%s: reauth work aborted\n", __func__);
 		return;
 	}
@@ -461,7 +461,7 @@
 	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
 }
 
-static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 link0_status;
@@ -470,7 +470,7 @@
 	int rc;
 
 	if (!hdcp_ctrl->aksv_valid) {
-		rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+		rc = msm_hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
 		if (rc) {
 			pr_err("%s: ASKV validation failed\n", __func__);
 			hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
@@ -538,12 +538,12 @@
 		DBG("An not ready after enabling HDCP");
 
 	/* Clear any DDC failures from previous tries before enable HDCP*/
-	rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+	rc = msm_reset_hdcp_ddc_failures(hdcp_ctrl);
 
 	return rc;
 }
 
-static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static void msm_hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val;
@@ -561,7 +561,7 @@
 	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
 }
 
-static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static void msm_hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val;
@@ -596,7 +596,7 @@
  * Write An and AKSV to sink
  * Read BKSV from sink and write into HDCP engine
  */
-static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -621,7 +621,7 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
@@ -643,7 +643,7 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
@@ -651,7 +651,7 @@
 	return 0;
 }
 
-static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc = 0;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -676,7 +676,7 @@
 	aksv[4] =  link0_aksv_1        & 0xFF;
 
 	/* Write An to offset 0x18 */
-	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+	rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
 		(u16)sizeof(link0_an));
 	if (rc) {
 		pr_err("%s:An write failed\n", __func__);
@@ -685,7 +685,7 @@
 	DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
 
 	/* Write AKSV to offset 0x10 */
-	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+	rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
 	if (rc) {
 		pr_err("%s:AKSV write failed\n", __func__);
 		return rc;
@@ -695,7 +695,7 @@
 	return 0;
 }
 
-static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc = 0;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -703,7 +703,7 @@
 	u32 reg[2], data[2];
 
 	/* Read BKSV at offset 0x00 */
-	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+	rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
 	if (rc) {
 		pr_err("%s:BKSV read failed\n", __func__);
 		return rc;
@@ -728,19 +728,19 @@
 	data[0] = hdcp_ctrl->bksv_lsb;
 	reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
 	data[1] = hdcp_ctrl->bksv_msb;
-	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+	rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
 
 	return rc;
 }
 
-static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc = 0;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg, data;
 	u8 bcaps;
 
-	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+	rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
 	if (rc) {
 		pr_err("%s:BCAPS read failed\n", __func__);
 		return rc;
@@ -753,26 +753,26 @@
 	/* Write BCAPS to the hardware */
 	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
 	data = (u32)bcaps;
-	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+	rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
 
 	return rc;
 }
 
-static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	unsigned long flags;
 	int rc;
 
 	/* Wait for AKSV key and An ready */
-	rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: wait key and an ready failed\n", __func__);
 		return rc;
 	};
 
 	/* Read BCAPS and send to HDCP engine */
-	rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: read bcaps error, abort\n", __func__);
 		return rc;
@@ -785,14 +785,14 @@
 	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
 
 	/* Send AKSV and An to sink */
-	rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_send_aksv_an(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s:An/Aksv write failed\n", __func__);
 		return rc;
 	}
 
 	/* Read BKSV and send to HDCP engine*/
-	rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_recv_bksv(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s:BKSV Process failed\n", __func__);
 		return rc;
@@ -812,7 +812,7 @@
 }
 
 /* read R0' from sink and pass it to HDCP engine */
-static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	int rc = 0;
@@ -822,12 +822,12 @@
 	 * HDCP Compliance Test case 1A-01:
 	 * Wait here at least 100ms before reading R0'
 	 */
-	rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+	rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
 	if (rc)
 		return rc;
 
 	/* Read R0' at offset 0x08 */
-	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+	rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
 	if (rc) {
 		pr_err("%s:R0' read failed\n", __func__);
 		return rc;
@@ -842,14 +842,14 @@
 }
 
 /* Wait for authenticating result: R0/R0' are matched or not */
-static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 link0_status;
 	int rc;
 
 	/* wait for hdcp irq, 10 sec should be long enough */
-	rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+	rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
 	if (!rc) {
 		pr_err("%s: Wait Auth IRQ timeout\n", __func__);
 		return -ETIMEDOUT;
@@ -869,7 +869,7 @@
 	return 0;
 }
 
-static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+static int msm_hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
 	u16 *pbstatus)
 {
 	int rc;
@@ -880,7 +880,7 @@
 	u8 buf[2];
 
 	/* Read BSTATUS at offset 0x41 */
-	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+	rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
 	if (rc) {
 		pr_err("%s: BSTATUS read failed\n", __func__);
 		goto error;
@@ -936,7 +936,7 @@
 	return rc;
 }
 
-static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+static int msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
 	struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
@@ -953,7 +953,7 @@
 	timeout_count = 100;
 	do {
 		/* Read BCAPS at offset 0x40 */
-		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+		rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
 		if (rc) {
 			pr_err("%s: BCAPS read failed\n", __func__);
 			return rc;
@@ -968,12 +968,12 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
 
-	rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+	rc = msm_hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
 	if (rc) {
 		pr_err("%s: bstatus error\n", __func__);
 		return rc;
@@ -982,7 +982,7 @@
 	/* Write BSTATUS and BCAPS to HDCP registers */
 	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
 	data = bcaps | (bstatus << 8);
-	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+	rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
 	if (rc) {
 		pr_err("%s: BSTATUS write failed\n", __func__);
 		return rc;
@@ -997,7 +997,7 @@
  * transfer V' from sink to HDCP engine
  * reset SHA engine
  */
-static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	int rc = 0;
@@ -1016,7 +1016,7 @@
 
 	for (i = 0; i < size; i++) {
 		rd = &reg_data[i];
-		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+		rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
 			rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
 		if (rc) {
 			pr_err("%s: Read %s failed\n", __func__, rd->name);
@@ -1027,13 +1027,13 @@
 		reg[i] = reg_data[i].reg_id;
 	}
 
-	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+	rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
 
 error:
 	return rc;
 }
 
-static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1041,7 +1041,7 @@
 
 	ksv_bytes = 5 * hdcp_ctrl->dev_count;
 
-	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+	rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
 		hdcp_ctrl->ksv_list, ksv_bytes);
 	if (rc)
 		pr_err("%s: KSV FIFO read failed\n", __func__);
@@ -1049,7 +1049,7 @@
 	return rc;
 }
 
-static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	u32 reg[2], data[2];
 	u32 rc  = 0;
@@ -1059,12 +1059,12 @@
 	reg[1] = REG_HDMI_HDCP_SHA_CTRL;
 	data[1] = HDCP_REG_DISABLE;
 
-	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+	rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
 
 	return rc;
 }
 
-static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+static int msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(
 	struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
@@ -1081,7 +1081,7 @@
 	 */
 	timeout_count = 100;
 	do {
-		rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+		rc = msm_hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
 		if (!rc)
 			break;
 
@@ -1091,19 +1091,19 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
 
-	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_transfer_v_h(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: transfer V failed\n", __func__);
 		return rc;
 	}
 
 	/* reset SHA engine before write ksv fifo */
-	rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: fail to reset sha engine\n", __func__);
 		return rc;
@@ -1120,7 +1120,7 @@
  * If the last byte is written, we need to poll for
  * HDCP_SHA_COMP_DONE to wait until HW finish
  */
-static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int i;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1169,7 +1169,7 @@
 
 		reg = REG_HDMI_HDCP_SHA_DATA;
 		data = reg_val;
-		rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+		rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
 
 		if (rc)
 			return rc;
@@ -1184,7 +1184,7 @@
 }
 
 /* write ksv fifo into HDCP engine */
-static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+static int msm_hdmi_hdcp_auth_part2_write_ksv_fifo(
 	struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc;
@@ -1193,7 +1193,7 @@
 	hdcp_ctrl->ksv_fifo_w_index = 0;
 	timeout_count = 100;
 	do {
-		rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+		rc = msm_hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
 		if (!rc)
 			break;
 
@@ -1206,7 +1206,7 @@
 			return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
@@ -1214,7 +1214,7 @@
 	return 0;
 }
 
-static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	int rc = 0;
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1232,7 +1232,7 @@
 				return -ETIMEDOUT;
 		}
 
-		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
 		if (rc)
 			return rc;
 	} while (1);
@@ -1240,32 +1240,32 @@
 	return 0;
 }
 
-static void hdmi_hdcp_auth_work(struct work_struct *work)
+static void msm_hdmi_hdcp_auth_work(struct work_struct *work)
 {
 	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
 		struct hdmi_hdcp_ctrl, hdcp_auth_work);
 	int rc;
 
-	rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_prepare(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: auth prepare failed %d\n", __func__, rc);
 		goto end;
 	}
 
 	/* HDCP PartI */
-	rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: key exchange failed %d\n", __func__, rc);
 		goto end;
 	}
 
-	rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: receive r0 failed %d\n", __func__, rc);
 		goto end;
 	}
 
-	rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: verify r0 failed %d\n", __func__, rc);
 		goto end;
@@ -1275,25 +1275,25 @@
 		goto end;
 
 	/* HDCP PartII */
-	rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
 		goto end;
 	}
 
-	rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
 		goto end;
 	}
 
-	rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
 	if (rc) {
 		pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
 		goto end;
 	}
 
-	rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+	rc = msm_hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
 	if (rc)
 		pr_err("%s: check v match failed %d\n", __func__, rc);
 
@@ -1304,13 +1304,13 @@
 		pr_info("%s: hdcp is not supported\n", __func__);
 	} else if (rc) {
 		pr_err("%s: hdcp authentication failed\n", __func__);
-		hdmi_hdcp_auth_fail(hdcp_ctrl);
+		msm_hdmi_hdcp_auth_fail(hdcp_ctrl);
 	} else {
-		hdmi_hdcp_auth_done(hdcp_ctrl);
+		msm_hdmi_hdcp_auth_done(hdcp_ctrl);
 	}
 }
 
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	u32 reg_val;
@@ -1335,7 +1335,7 @@
 	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
 }
 
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
 {
 	struct hdmi *hdmi = hdcp_ctrl->hdmi;
 	unsigned long flags;
@@ -1399,7 +1399,7 @@
 	DBG("HDCP: Off");
 }
 
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
 {
 	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
 
@@ -1413,8 +1413,8 @@
 	if (!hdcp_ctrl)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
-	INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_auth_work, msm_hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, msm_hdmi_hdcp_reauth_work);
 	init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
 	hdcp_ctrl->hdmi = hdmi;
 	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
@@ -1428,7 +1428,7 @@
 	return hdcp_ctrl;
 }
 
-void hdmi_hdcp_destroy(struct hdmi *hdmi)
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
 {
 	if (hdmi && hdmi->hdcp_ctrl) {
 		kfree(hdmi->hdcp_ctrl);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index f4ab7f7..de9007e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -97,7 +97,7 @@
 	return hdmi_i2c->sw_done;
 }
 
-static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
 		struct i2c_msg *msgs, int num)
 {
 	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
@@ -216,17 +216,17 @@
 	return i;
 }
 
-static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-static const struct i2c_algorithm hdmi_i2c_algorithm = {
-	.master_xfer	= hdmi_i2c_xfer,
-	.functionality	= hdmi_i2c_func,
+static const struct i2c_algorithm msm_hdmi_i2c_algorithm = {
+	.master_xfer	= msm_hdmi_i2c_xfer,
+	.functionality	= msm_hdmi_i2c_func,
 };
 
-void hdmi_i2c_irq(struct i2c_adapter *i2c)
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c)
 {
 	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
 
@@ -234,14 +234,14 @@
 		wake_up_all(&hdmi_i2c->ddc_event);
 }
 
-void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
 {
 	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
 	i2c_del_adapter(i2c);
 	kfree(hdmi_i2c);
 }
 
-struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
 {
 	struct drm_device *dev = hdmi->dev;
 	struct hdmi_i2c_adapter *hdmi_i2c;
@@ -264,7 +264,7 @@
 	i2c->class = I2C_CLASS_DDC;
 	snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
 	i2c->dev.parent = &hdmi->pdev->dev;
-	i2c->algo = &hdmi_i2c_algorithm;
+	i2c->algo = &msm_hdmi_i2c_algorithm;
 
 	ret = i2c_add_adapter(i2c);
 	if (ret) {
@@ -276,6 +276,6 @@
 
 fail:
 	if (i2c)
-		hdmi_i2c_destroy(i2c);
+		msm_hdmi_i2c_destroy(i2c);
 	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
new file mode 100644
index 0000000..534ce5b
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_device.h>
+
+#include "hdmi.h"
+
+static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_cfg *cfg = phy->cfg;
+	struct device *dev = &phy->pdev->dev;
+	int i, ret;
+
+	phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs,
+				 GFP_KERNEL);
+	if (!phy->regs)
+		return -ENOMEM;
+
+	phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks,
+				 GFP_KERNEL);
+	if (!phy->clks)
+		return -ENOMEM;
+
+	for (i = 0; i < cfg->num_regs; i++) {
+		struct regulator *reg;
+
+		reg = devm_regulator_get(dev, cfg->reg_names[i]);
+		if (IS_ERR(reg)) {
+			ret = PTR_ERR(reg);
+			dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+				cfg->reg_names[i], ret);
+			return ret;
+		}
+
+		phy->regs[i] = reg;
+	}
+
+	for (i = 0; i < cfg->num_clks; i++) {
+		struct clk *clk;
+
+		clk = devm_clk_get(dev, cfg->clk_names[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(dev, "failed to get phy clock: %s (%d)\n",
+				cfg->clk_names[i], ret);
+			return ret;
+		}
+
+		phy->clks[i] = clk;
+	}
+
+	return 0;
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_cfg *cfg = phy->cfg;
+	struct device *dev = &phy->pdev->dev;
+	int i, ret = 0;
+
+	pm_runtime_get_sync(dev);
+
+	for (i = 0; i < cfg->num_regs; i++) {
+		ret = regulator_enable(phy->regs[i]);
+		if (ret)
+			dev_err(dev, "failed to enable regulator: %s (%d)\n",
+				cfg->reg_names[i], ret);
+	}
+
+	for (i = 0; i < cfg->num_clks; i++) {
+		ret = clk_prepare_enable(phy->clks[i]);
+		if (ret)
+			dev_err(dev, "failed to enable clock: %s (%d)\n",
+				cfg->clk_names[i], ret);
+	}
+
+	return ret;
+}
+
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_cfg *cfg = phy->cfg;
+	struct device *dev = &phy->pdev->dev;
+	int i;
+
+	for (i = cfg->num_clks - 1; i >= 0; i--)
+		clk_disable_unprepare(phy->clks[i]);
+
+	for (i = cfg->num_regs - 1; i >= 0; i--)
+		regulator_disable(phy->regs[i]);
+
+	pm_runtime_put_sync(dev);
+}
+
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock)
+{
+	if (!phy || !phy->cfg->powerup)
+		return;
+
+	phy->cfg->powerup(phy, pixclock);
+}
+
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy)
+{
+	if (!phy || !phy->cfg->powerdown)
+		return;
+
+	phy->cfg->powerdown(phy);
+}
+
+static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
+			     enum hdmi_phy_type type)
+{
+	int ret;
+
+	switch (type) {
+	case MSM_HDMI_PHY_8960:
+		ret = msm_hdmi_pll_8960_init(pdev);
+		break;
+	case MSM_HDMI_PHY_8996:
+		ret = msm_hdmi_pll_8996_init(pdev);
+		break;
+	/*
+	 * we don't have PLL support for these, don't report an error for now
+	 */
+	case MSM_HDMI_PHY_8x60:
+	case MSM_HDMI_PHY_8x74:
+	default:
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+static int msm_hdmi_phy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct hdmi_phy *phy;
+	int ret;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENODEV;
+
+	phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev);
+	if (!phy->cfg)
+		return -ENODEV;
+
+	phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
+	if (IS_ERR(phy->mmio)) {
+		dev_err(dev, "%s: failed to map phy base\n", __func__);
+		return -ENOMEM;
+	}
+
+	phy->pdev = pdev;
+
+	ret = msm_hdmi_phy_resource_init(phy);
+	if (ret)
+		return ret;
+
+	pm_runtime_enable(&pdev->dev);
+
+	ret = msm_hdmi_phy_resource_enable(phy);
+	if (ret)
+		return ret;
+
+	ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
+	if (ret) {
+		dev_err(dev, "couldn't init PLL\n");
+		msm_hdmi_phy_resource_disable(phy);
+		return ret;
+	}
+
+	msm_hdmi_phy_resource_disable(phy);
+
+	platform_set_drvdata(pdev, phy);
+
+	return 0;
+}
+
+static int msm_hdmi_phy_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id msm_hdmi_phy_dt_match[] = {
+	{ .compatible = "qcom,hdmi-phy-8660",
+	  .data = &msm_hdmi_phy_8x60_cfg },
+	{ .compatible = "qcom,hdmi-phy-8960",
+	  .data = &msm_hdmi_phy_8960_cfg },
+	{ .compatible = "qcom,hdmi-phy-8974",
+	  .data = &msm_hdmi_phy_8x74_cfg },
+	{ .compatible = "qcom,hdmi-phy-8084",
+	  .data = &msm_hdmi_phy_8x74_cfg },
+	{ .compatible = "qcom,hdmi-phy-8996",
+	  .data = &msm_hdmi_phy_8996_cfg },
+	{}
+};
+
+static struct platform_driver msm_hdmi_phy_platform_driver = {
+	.probe      = msm_hdmi_phy_probe,
+	.remove     = msm_hdmi_phy_remove,
+	.driver     = {
+		.name   = "msm_hdmi_phy",
+		.of_match_table = msm_hdmi_phy_dt_match,
+	},
+};
+
+void __init msm_hdmi_phy_driver_register(void)
+{
+	platform_driver_register(&msm_hdmi_phy_platform_driver);
+}
+
+void __exit msm_hdmi_phy_driver_unregister(void)
+{
+	platform_driver_unregister(&msm_hdmi_phy_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 3a01cb5..e6ee6b7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,495 +15,48 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifdef CONFIG_COMMON_CLK
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#endif
-
 #include "hdmi.h"
 
-struct hdmi_phy_8960 {
-	struct hdmi_phy base;
-	struct hdmi *hdmi;
-#ifdef CONFIG_COMMON_CLK
-	struct clk_hw pll_hw;
-	struct clk *pll;
-	unsigned long pixclk;
-#endif
-};
-#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
-
-#ifdef CONFIG_COMMON_CLK
-#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
-
-/*
- * HDMI PLL:
- *
- * To get the parent clock setup properly, we need to plug in hdmi pll
- * configuration into common-clock-framework.
- */
-
-struct pll_rate {
-	unsigned long rate;
-	struct {
-		uint32_t val;
-		uint32_t reg;
-	} conf[32];
-};
-
-/* NOTE: keep sorted highest freq to lowest: */
-static const struct pll_rate freqtbl[] = {
-	{ 154000000, {
-		{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0, 0 } }
-	},
-	/* 1080p60/1080p50 case */
-	{ 148500000, {
-		{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
-		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
-		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
-		{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
-		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
-		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
-		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
-		{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
-		{ 0, 0 } }
-	},
-	{ 108000000, {
-		{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0, 0 } }
-	},
-	/* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
-	{ 74250000, {
-		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
-		{ 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0, 0 } }
-	},
-	{ 74176000, {
-		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0, 0 } }
-	},
-	{ 65000000, {
-		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0, 0 } }
-	},
-	/* 480p60/480i60 */
-	{ 27030000, {
-		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
-		{ 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
-		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
-		{ 0, 0 } }
-	},
-	/* 576p50/576i50 */
-	{ 27000000, {
-		{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
-		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
-		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
-		{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
-		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
-		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
-		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
-		{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
-		{ 0, 0 } }
-	},
-	/* 640x480p60 */
-	{ 25200000, {
-		{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
-		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
-		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
-		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
-		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
-		{ 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
-		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
-		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
-		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
-		{ 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
-		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
-		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
-		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
-		{ 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
-		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
-		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
-		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
-		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
-		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
-		{ 0, 0 } }
-	},
-};
-
-static int hdmi_pll_enable(struct clk_hw *hw)
-{
-	struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	int timeout_count, pll_lock_retry = 10;
-	unsigned int val;
-
-	DBG("");
-
-	/* Assert PLL S/W reset */
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
-
-	/* Wait for a short time before de-asserting
-	 * to allow the hardware to complete its job.
-	 * This much of delay should be fine for hardware
-	 * to assert and de-assert.
-	 */
-	udelay(10);
-
-	/* De-assert PLL S/W reset */
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
-	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
-	val |= HDMI_8960_PHY_REG12_SW_RESET;
-	/* Assert PHY S/W reset */
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
-	val &= ~HDMI_8960_PHY_REG12_SW_RESET;
-	/* Wait for a short time before de-asserting
-	   to allow the hardware to complete its job.
-	   This much of delay should be fine for hardware
-	   to assert and de-assert. */
-	udelay(10);
-	/* De-assert PHY S/W reset */
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2,  0x3f);
-
-	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
-	val |= HDMI_8960_PHY_REG12_PWRDN_B;
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
-	/* Wait 10 us for enabling global power for PHY */
-	mb();
-	udelay(10);
-
-	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
-	val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
-	val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
-
-	timeout_count = 1000;
-	while (--pll_lock_retry > 0) {
-
-		/* are we there yet? */
-		val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
-		if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
-			break;
-
-		udelay(1);
-
-		if (--timeout_count > 0)
-			continue;
-
-		/*
-		 * PLL has still not locked.
-		 * Do a software reset and try again
-		 * Assert PLL S/W reset first
-		 */
-		hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
-		udelay(10);
-		hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
-		/*
-		 * Wait for a short duration for the PLL calibration
-		 * before checking if the PLL gets locked
-		 */
-		udelay(350);
-
-		timeout_count = 1000;
-	}
-
-	return 0;
-}
-
-static void hdmi_pll_disable(struct clk_hw *hw)
-{
-	struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	unsigned int val;
-
-	DBG("");
-
-	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
-	val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
-
-	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
-	val |= HDMI_8960_PHY_REG12_SW_RESET;
-	val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
-	/* Make sure HDMI PHY/PLL are powered down */
-	mb();
-}
-
-static const struct pll_rate *find_rate(unsigned long rate)
-{
-	int i;
-	for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
-		if (rate > freqtbl[i].rate)
-			return &freqtbl[i-1];
-	return &freqtbl[i-1];
-}
-
-static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
-				unsigned long parent_rate)
-{
-	struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
-	return phy_8960->pixclk;
-}
-
-static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long *parent_rate)
-{
-	const struct pll_rate *pll_rate = find_rate(rate);
-	return pll_rate->rate;
-}
-
-static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long parent_rate)
-{
-	struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	const struct pll_rate *pll_rate = find_rate(rate);
-	int i;
-
-	DBG("rate=%lu", rate);
-
-	for (i = 0; pll_rate->conf[i].reg; i++)
-		hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
-
-	phy_8960->pixclk = rate;
-
-	return 0;
-}
-
-
-static const struct clk_ops hdmi_pll_ops = {
-	.enable = hdmi_pll_enable,
-	.disable = hdmi_pll_disable,
-	.recalc_rate = hdmi_pll_recalc_rate,
-	.round_rate = hdmi_pll_round_rate,
-	.set_rate = hdmi_pll_set_rate,
-};
-
-static const char *hdmi_pll_parents[] = {
-	"pxo",
-};
-
-static struct clk_init_data pll_init = {
-	.name = "hdmi_pll",
-	.ops = &hdmi_pll_ops,
-	.parent_names = hdmi_pll_parents,
-	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
-};
-#endif
-
-/*
- * HDMI Phy:
- */
-
-static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	kfree(phy_8960);
-}
-
 static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
-		unsigned long int pixclock)
+				  unsigned long int pixclock)
 {
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	struct hdmi *hdmi = phy_8960->hdmi;
-
 	DBG("pixclock: %lu", pixclock);
 
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20);
 }
 
 static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
 {
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	struct hdmi *hdmi = phy_8960->hdmi;
-
 	DBG("");
 
-	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f);
 }
 
-static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
-		.destroy = hdmi_phy_8960_destroy,
-		.powerup = hdmi_phy_8960_powerup,
-		.powerdown = hdmi_phy_8960_powerdown,
+static const char * const hdmi_phy_8960_reg_names[] = {
+	"core-vdda",
 };
 
-struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
-{
-	struct hdmi_phy_8960 *phy_8960;
-	struct hdmi_phy *phy = NULL;
-	int ret;
-#ifdef CONFIG_COMMON_CLK
-	int i;
+static const char * const hdmi_phy_8960_clk_names[] = {
+	"slave_iface_clk",
+};
 
-	/* sanity check: */
-	for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
-		if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
-			return ERR_PTR(-EINVAL);
-#endif
-
-	phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
-	if (!phy_8960) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	phy = &phy_8960->base;
-
-	phy->funcs = &hdmi_phy_8960_funcs;
-
-	phy_8960->hdmi = hdmi;
-
-#ifdef CONFIG_COMMON_CLK
-	phy_8960->pll_hw.init = &pll_init;
-	phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
-	if (IS_ERR(phy_8960->pll)) {
-		ret = PTR_ERR(phy_8960->pll);
-		phy_8960->pll = NULL;
-		goto fail;
-	}
-#endif
-
-	return phy;
-
-fail:
-	if (phy)
-		hdmi_phy_8960_destroy(phy);
-	return ERR_PTR(ret);
-}
+const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
+	.type = MSM_HDMI_PHY_8960,
+	.powerup = hdmi_phy_8960_powerup,
+	.powerdown = hdmi_phy_8960_powerdown,
+	.reg_names = hdmi_phy_8960_reg_names,
+	.num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names),
+	.clk_names = hdmi_phy_8960_clk_names,
+	.num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
new file mode 100644
index 0000000..aa94a55
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -0,0 +1,766 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ			12000000000UL
+#define HDMI_VCO_MIN_FREQ			8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ			600000000
+#define HDMI_PCLK_MIN_FREQ			25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD	3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD		1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD		750000000UL
+#define HDMI_CORECLK_DIV			5
+#define HDMI_DEFAULT_REF_CLOCK			19200000
+#define HDMI_PLL_CMP_CNT			1024
+
+#define HDMI_PLL_POLL_MAX_READS			100
+#define HDMI_PLL_POLL_TIMEOUT_US		150
+
+#define HDMI_NUM_TX_CHANNEL			4
+
+struct hdmi_pll_8996 {
+	struct platform_device *pdev;
+	struct clk_hw clk_hw;
+
+	/* pll mmio base */
+	void __iomem *mmio_qserdes_com;
+	/* tx channel base */
+	void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw)
+
+struct hdmi_8996_phy_pll_reg_cfg {
+	u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+	u32 com_svs_mode_clk_sel;
+	u32 com_hsclk_sel;
+	u32 com_pll_cctrl_mode0;
+	u32 com_pll_rctrl_mode0;
+	u32 com_cp_ctrl_mode0;
+	u32 com_dec_start_mode0;
+	u32 com_div_frac_start1_mode0;
+	u32 com_div_frac_start2_mode0;
+	u32 com_div_frac_start3_mode0;
+	u32 com_integloop_gain0_mode0;
+	u32 com_integloop_gain1_mode0;
+	u32 com_lock_cmp_en;
+	u32 com_lock_cmp1_mode0;
+	u32 com_lock_cmp2_mode0;
+	u32 com_lock_cmp3_mode0;
+	u32 com_core_clk_en;
+	u32 com_coreclk_div;
+	u32 com_vco_tune_ctrl;
+
+	u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL];
+	u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL];
+
+	u32 phy_mode;
+};
+
+struct hdmi_8996_post_divider {
+	u64 vco_freq;
+	int hsclk_divsel;
+	int vco_ratio;
+	int tx_band_sel;
+	int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll)
+{
+	return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset,
+				  u32 data)
+{
+	msm_writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset)
+{
+	return msm_readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel,
+				      int offset, int data)
+{
+	 msm_writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+				 bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		return (11000000 / (ref_clk / 20));
+
+	return 0x23;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		return 0x16;
+
+	return 0x10;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		return 0x28;
+
+	return 0x1;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+					 bool gen_ssc)
+{
+	int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+	u64 base;
+
+	if ((frac_start != 0) || gen_ssc)
+		base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK;
+	else
+		base = (1022 * ref_clk) / 100;
+
+	base <<= digclk_divsel;
+
+	return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+	u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+	u32 divisor = ref_clk * 10;
+	u32 rem;
+
+	rem = do_div(dividend, divisor);
+	if (rem > (divisor >> 1))
+		dividend++;
+
+	return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+	u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+	do_div(fdata, HDMI_PLL_CMP_CNT);
+
+	return fdata;
+}
+
+static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk)
+{
+	int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 };
+	int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 };
+	int tx_band_sel[] = { 0, 1, 2, 3 };
+	u64 vco_freq[60];
+	u64 vco, vco_optimal;
+	int half_rate_mode = 0;
+	int vco_optimal_index, vco_freq_index;
+	int i, j;
+
+retry:
+	vco_optimal = HDMI_VCO_MAX_FREQ;
+	vco_optimal_index = -1;
+	vco_freq_index = 0;
+	for (i = 0; i < 15; i++) {
+		for (j = 0; j < 4; j++) {
+			u32 ratio_mult = ratio[i] << tx_band_sel[j];
+
+			vco = bclk >> half_rate_mode;
+			vco *= ratio_mult;
+			vco_freq[vco_freq_index++] = vco;
+		}
+	}
+
+	for (i = 0; i < 60; i++) {
+		u64 vco_tmp = vco_freq[i];
+
+		if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+		    (vco_tmp <= vco_optimal)) {
+			vco_optimal = vco_tmp;
+			vco_optimal_index = i;
+		}
+	}
+
+	if (vco_optimal_index == -1) {
+		if (!half_rate_mode) {
+			half_rate_mode = 1;
+			goto retry;
+		}
+	} else {
+		pd->vco_freq = vco_optimal;
+		pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+		pd->vco_ratio = ratio[vco_optimal_index / 4];
+		pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4];
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+			 struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	struct hdmi_8996_post_divider pd;
+	u64 bclk;
+	u64 tmds_clk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 fdata;
+	u32 pll_divisor;
+	u32 rem;
+	u32 cpctrl;
+	u32 rctrl;
+	u32 cctrl;
+	u32 integloop_gain;
+	u32 pll_cmp;
+	int i, ret;
+
+	/* bit clk = 10 * pix_clk */
+	bclk = ((u64)pix_clk) * 10;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = pix_clk >> 2;
+	else
+		tmds_clk = pix_clk;
+
+	ret = pll_get_post_div(&pd, bclk);
+	if (ret)
+		return ret;
+
+	dec_start = pd.vco_freq;
+	pll_divisor = 4 * ref_clk;
+	do_div(dec_start, pll_divisor);
+
+	frac_start = pd.vco_freq * (1 << 20);
+
+	rem = do_div(frac_start, pll_divisor);
+	frac_start -= dec_start * (1 << 20);
+	if (rem > (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+	rctrl = pll_get_rctrl(frac_start, false);
+	cctrl = pll_get_cctrl(frac_start, false);
+	integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+						ref_clk, false);
+
+	fdata = pd.vco_freq;
+	do_div(fdata, pd.vco_ratio);
+
+	pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+	DBG("VCO freq: %llu", pd.vco_freq);
+	DBG("fdata: %llu", fdata);
+	DBG("pix_clk: %lu", pix_clk);
+	DBG("tmds clk: %llu", tmds_clk);
+	DBG("HSCLK_SEL: %d", pd.hsclk_divsel);
+	DBG("DEC_START: %llu", dec_start);
+	DBG("DIV_FRAC_START: %llu", frac_start);
+	DBG("PLL_CPCTRL: %u", cpctrl);
+	DBG("PLL_RCTRL: %u", rctrl);
+	DBG("PLL_CCTRL: %u", cctrl);
+	DBG("INTEGLOOP_GAIN: %u", integloop_gain);
+	DBG("TX_BAND: %d", pd.tx_band_sel);
+	DBG("PLL_CMP: %u", pll_cmp);
+
+	/* Convert these values to register specific values */
+	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+		cfg->com_svs_mode_clk_sel = 1;
+	else
+		cfg->com_svs_mode_clk_sel = 2;
+
+	cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_lock_cmp_en = 0x0;
+	cfg->com_core_clk_en = 0x2c;
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	cfg->com_vco_tune_ctrl = 0x0;
+
+	cfg->tx_lx_lane_mode[0] =
+		cfg->tx_lx_lane_mode[2] = 0x43;
+
+	cfg->tx_lx_hp_pd_enables[0] =
+		cfg->tx_lx_hp_pd_enables[1] =
+		cfg->tx_lx_hp_pd_enables[2] = 0x0c;
+	cfg->tx_lx_hp_pd_enables[3] = 0x3;
+
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+		cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_lx_tx_drv_lvl[0] =
+			cfg->tx_lx_tx_drv_lvl[1] =
+			cfg->tx_lx_tx_drv_lvl[2] = 0x25;
+		cfg->tx_lx_tx_drv_lvl[3] = 0x22;
+
+		cfg->tx_lx_tx_emp_post1_lvl[0] =
+			cfg->tx_lx_tx_emp_post1_lvl[1] =
+			cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23;
+		cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27;
+
+		cfg->tx_lx_vmode_ctrl1[0] =
+			cfg->tx_lx_vmode_ctrl1[1] =
+			cfg->tx_lx_vmode_ctrl1[2] =
+			cfg->tx_lx_vmode_ctrl1[3] = 0x00;
+
+		cfg->tx_lx_vmode_ctrl2[0] =
+			cfg->tx_lx_vmode_ctrl2[1] =
+			cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+
+		cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+			cfg->tx_lx_tx_drv_lvl[i] = 0x25;
+			cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23;
+			cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+		}
+
+		cfg->tx_lx_vmode_ctrl2[0] =
+			cfg->tx_lx_vmode_ctrl2[1] =
+			cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+		cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+	} else {
+		for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+			cfg->tx_lx_tx_drv_lvl[i] = 0x20;
+			cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20;
+			cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+			cfg->tx_lx_vmode_ctrl2[i] = 0x0E;
+		}
+	}
+
+	DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel);
+	DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel);
+	DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en);
+	DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0);
+	DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0);
+	DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0);
+	DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0);
+	DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0);
+	DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0);
+	DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0);
+	DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0);
+	DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0);
+	DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0);
+	DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0);
+	DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0);
+	DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en);
+	DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div);
+	DBG("phy_mode = 0x%x", cfg->phy_mode);
+
+	DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]);
+	DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]);
+
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+		DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]);
+		DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]);
+		DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i,
+		    cfg->tx_lx_tx_emp_post1_lvl[i]);
+		DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]);
+		DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]);
+	}
+
+	return 0;
+}
+
+static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+				      unsigned long parent_rate)
+{
+	struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+	struct hdmi_phy *phy = pll_get_phy(pll);
+	struct hdmi_8996_phy_pll_reg_cfg cfg;
+	int i, ret;
+
+	memset(&cfg, 0x00, sizeof(cfg));
+
+	ret = pll_calculate(rate, parent_rate, &cfg);
+	if (ret) {
+		DRM_ERROR("PLL calculation failed\n");
+		return ret;
+	}
+
+	/* Initially shut down PHY */
+	DBG("Disabling PHY");
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0);
+	udelay(500);
+
+	/* Power up sequence */
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04);
+
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F);
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F);
+
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE,
+				   0x03);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND,
+				   cfg.tx_lx_tx_band[i]);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN,
+				   0x03);
+	}
+
+	hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+			   cfg.tx_lx_lane_mode[0]);
+	hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+			   cfg.tx_lx_lane_mode[2]);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+	/* Bypass VCO calibration */
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+		       cfg.com_svs_mode_clk_sel);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL,
+		       cfg.com_vco_tune_ctrl);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL,
+		       cfg.com_hsclk_sel);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN,
+		       cfg.com_lock_cmp_en);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+		       cfg.com_pll_cctrl_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+		       cfg.com_pll_rctrl_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0,
+		       cfg.com_cp_ctrl_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0,
+		       cfg.com_dec_start_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+		       cfg.com_div_frac_start1_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+		       cfg.com_div_frac_start2_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+		       cfg.com_div_frac_start3_mode0);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+		       cfg.com_integloop_gain0_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+		       cfg.com_integloop_gain1_mode0);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+		       cfg.com_lock_cmp1_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+		       cfg.com_lock_cmp2_mode0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+		       cfg.com_lock_cmp3_mode0);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN,
+		       cfg.com_core_clk_en);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV,
+		       cfg.com_coreclk_div);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02);
+
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+	/* TX lanes setup (TX 0/1/2/3) */
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL,
+				   cfg.tx_lx_tx_drv_lvl[i]);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL,
+				   cfg.tx_lx_tx_emp_post1_lvl[i]);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1,
+				   cfg.tx_lx_vmode_ctrl1[i]);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2,
+				   cfg.tx_lx_vmode_ctrl2[i]);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET,
+				   0x00);
+		hdmi_tx_chan_write(pll, i,
+			REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET,
+			0x00);
+		hdmi_tx_chan_write(pll, i,
+			REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN,
+			0x03);
+		hdmi_tx_chan_write(pll, i,
+			REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN,
+			0x40);
+		hdmi_tx_chan_write(pll, i,
+				   REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES,
+				   cfg.tx_lx_hp_pd_enables[i]);
+	}
+
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode);
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F);
+
+	/*
+	 * Ensure that vco configuration gets flushed to hardware before
+	 * enabling the PLL
+	 */
+	wmb();
+
+	return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy)
+{
+	u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+	unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+	u32 status;
+	int phy_ready = 0;
+
+	DBG("Waiting for PHY ready");
+
+	while (nb_tries--) {
+		status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS);
+		phy_ready = status & BIT(0);
+
+		if (phy_ready)
+			break;
+
+		udelay(timeout);
+	}
+
+	DBG("PHY is %sready", phy_ready ? "" : "*not* ");
+
+	return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll)
+{
+	u32 status;
+	int nb_tries = HDMI_PLL_POLL_MAX_READS;
+	unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+	int pll_locked = 0;
+
+	DBG("Waiting for PLL lock");
+
+	while (nb_tries--) {
+		status = hdmi_pll_read(pll,
+				       REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+		pll_locked = status & BIT(0);
+
+		if (pll_locked)
+			break;
+
+		udelay(timeout);
+	}
+
+	DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+	return pll_locked;
+}
+
+static int hdmi_8996_pll_prepare(struct clk_hw *hw)
+{
+	struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+	struct hdmi_phy *phy = pll_get_phy(pll);
+	int i, ret = 0;
+
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1);
+	udelay(100);
+
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+	udelay(100);
+
+	ret = hdmi_8996_pll_lock_status(pll);
+	if (!ret)
+		return ret;
+
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+		hdmi_tx_chan_write(pll, i,
+			REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+			0x6F);
+
+	/* Disable SSC */
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+	hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+	ret = hdmi_8996_phy_ready_status(phy);
+	if (!ret)
+		return ret;
+
+	/* Restart the retiming buffer */
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18);
+	udelay(1);
+	hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+
+	return 0;
+}
+
+static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
+				     unsigned long rate,
+				     unsigned long *parent_rate)
+{
+	if (rate < HDMI_PCLK_MIN_FREQ)
+		return HDMI_PCLK_MIN_FREQ;
+	else if (rate > HDMI_PCLK_MAX_FREQ)
+		return HDMI_PCLK_MAX_FREQ;
+	else
+		return rate;
+}
+
+static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
+					       unsigned long parent_rate)
+{
+	struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+	u64 fdata;
+	u32 cmp1, cmp2, cmp3, pll_cmp;
+
+	cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0);
+	cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0);
+	cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0);
+
+	pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16);
+
+	fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate);
+
+	do_div(fdata, 10);
+
+	return fdata;
+}
+
+static void hdmi_8996_pll_unprepare(struct clk_hw *hw)
+{
+}
+
+static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
+{
+	struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+	u32 status;
+	int pll_locked;
+
+	status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+	pll_locked = status & BIT(0);
+
+	return pll_locked;
+}
+
+static struct clk_ops hdmi_8996_pll_ops = {
+	.set_rate = hdmi_8996_pll_set_clk_rate,
+	.round_rate = hdmi_8996_pll_round_rate,
+	.recalc_rate = hdmi_8996_pll_recalc_rate,
+	.prepare = hdmi_8996_pll_prepare,
+	.unprepare = hdmi_8996_pll_unprepare,
+	.is_enabled = hdmi_8996_pll_is_enabled,
+};
+
+static const char * const hdmi_pll_parents[] = {
+	"xo",
+};
+
+static struct clk_init_data pll_init = {
+	.name = "hdmipll",
+	.ops = &hdmi_8996_pll_ops,
+	.parent_names = hdmi_pll_parents,
+	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct hdmi_pll_8996 *pll;
+	struct clk *clk;
+	int i;
+
+	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return -ENOMEM;
+
+	pll->pdev = pdev;
+
+	pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
+	if (IS_ERR(pll->mmio_qserdes_com)) {
+		dev_err(dev, "failed to map pll base\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+		char name[32], label[32];
+
+		snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+		snprintf(label, sizeof(label), "HDMI_TX_L%d", i);
+
+		pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
+		if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+			dev_err(dev, "failed to map pll base\n");
+			return -ENOMEM;
+		}
+	}
+	pll->clk_hw.init = &pll_init;
+
+	clk = devm_clk_register(dev, &pll->clk_hw);
+	if (IS_ERR(clk)) {
+		dev_err(dev, "failed to register pll clock\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const char * const hdmi_phy_8996_reg_names[] = {
+	"vddio",
+	"vcca",
+};
+
+static const char * const hdmi_phy_8996_clk_names[] = {
+	"mmagic_iface_clk",
+	"iface_clk",
+	"ref_clk",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
+	.type = MSM_HDMI_PHY_8996,
+	.reg_names = hdmi_phy_8996_reg_names,
+	.num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names),
+	.clk_names = hdmi_phy_8996_clk_names,
+	.num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index cb01421..a68eea4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -17,166 +17,122 @@
 
 #include "hdmi.h"
 
-struct hdmi_phy_8x60 {
-	struct hdmi_phy base;
-	struct hdmi *hdmi;
-};
-#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
-
-static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	kfree(phy_8x60);
-}
-
 static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	struct hdmi *hdmi = phy_8x60->hdmi;
-
 	/* De-serializer delay D/C for non-lbk mode: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
-			HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0,
+		       HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
 
 	if (pixclock == 27000000) {
 		/* video_format == HDMI_VFRMT_720x480p60_16_9 */
-		hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
-				HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
-				HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+		hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+			       HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+			       HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
 	} else {
-		hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
-				HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
-				HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+		hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+			       HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+			       HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
 	}
 
 	/* No matter what, start from the power down mode: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_PD_PWRGEN |
-			HDMI_8x60_PHY_REG2_PD_PLL |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_PD_PWRGEN |
+		       HDMI_8x60_PHY_REG2_PD_PLL |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 
 	/* Turn PowerGen on: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_PD_PLL |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_PD_PLL |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 
 	/* Turn PLL power on: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 
 	/* Write to HIGH after PLL power down de-assert: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
-			HDMI_8x60_PHY_REG3_PLL_ENABLE);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3,
+		       HDMI_8x60_PHY_REG3_PLL_ENABLE);
 
 	/* ASIC power on; PHY REG9 = 0 */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
 
 	/* Enable PLL lock detect, PLL lock det will go high after lock
 	 * Enable the re-time logic
 	 */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
-			HDMI_8x60_PHY_REG12_RETIMING_EN |
-			HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+		       HDMI_8x60_PHY_REG12_RETIMING_EN |
+		       HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
 
 	/* Drivers are on: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 
 	/* If the RX detector is needed: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0);
 
 	/* If we want to use lock enable based on counting: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
-			HDMI_8x60_PHY_REG12_RETIMING_EN |
-			HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
-			HDMI_8x60_PHY_REG12_FORCE_LOCK);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+		       HDMI_8x60_PHY_REG12_RETIMING_EN |
+		       HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+		       HDMI_8x60_PHY_REG12_FORCE_LOCK);
 }
 
 static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
 {
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	struct hdmi *hdmi = phy_8x60->hdmi;
-
 	/* Assert RESET PHY from controller */
-	hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-			HDMI_PHY_CTRL_SW_RESET);
+	hdmi_phy_write(phy, REG_HDMI_PHY_CTRL,
+		       HDMI_PHY_CTRL_SW_RESET);
 	udelay(10);
 	/* De-assert RESET PHY from controller */
-	hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
+	hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0);
 	/* Turn off Driver */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 	udelay(10);
 	/* Disable PLL */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0);
 	/* Power down PHY, but keep RX-sense: */
-	hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
-			HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
-			HDMI_8x60_PHY_REG2_PD_PWRGEN |
-			HDMI_8x60_PHY_REG2_PD_PLL |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
-			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
-			HDMI_8x60_PHY_REG2_PD_DESER);
+	hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+		       HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+		       HDMI_8x60_PHY_REG2_PD_PWRGEN |
+		       HDMI_8x60_PHY_REG2_PD_PLL |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+		       HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+		       HDMI_8x60_PHY_REG2_PD_DESER);
 }
 
-static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
-		.destroy = hdmi_phy_8x60_destroy,
-		.powerup = hdmi_phy_8x60_powerup,
-		.powerdown = hdmi_phy_8x60_powerdown,
+const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = {
+	.type = MSM_HDMI_PHY_8x60,
+	.powerup = hdmi_phy_8x60_powerup,
+	.powerdown = hdmi_phy_8x60_powerdown,
 };
-
-struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
-{
-	struct hdmi_phy_8x60 *phy_8x60;
-	struct hdmi_phy *phy = NULL;
-	int ret;
-
-	phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
-	if (!phy_8x60) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	phy = &phy_8x60->base;
-
-	phy->funcs = &hdmi_phy_8x60_funcs;
-
-	phy_8x60->hdmi = hdmi;
-
-	return phy;
-
-fail:
-	if (phy)
-		hdmi_phy_8x60_destroy(phy);
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 56ab891..c4a61e5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -17,84 +17,40 @@
 
 #include "hdmi.h"
 
-struct hdmi_phy_8x74 {
-	struct hdmi_phy base;
-	void __iomem *mmio;
-};
-#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
-
-
-static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
-{
-	msm_writel(data, phy->mmio + reg);
-}
-
-//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
-//{
-//	return msm_readl(phy->mmio + reg);
-//}
-
-static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-	kfree(phy_8x74);
-}
-
 static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-
-	phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0,   0x1b);
-	phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1,   0xf2);
-	phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0,  0x0);
-	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
-	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
-	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
-	phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
-	phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1,   0x20);
+	hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0,   0x1b);
+	hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1,   0xf2);
+	hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0,  0x0);
+	hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0);
+	hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0);
+	hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0);
+	hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0);
+	hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1,   0x20);
 }
 
 static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
 {
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-	phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+	hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f);
 }
 
-static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
-		.destroy = hdmi_phy_8x74_destroy,
-		.powerup = hdmi_phy_8x74_powerup,
-		.powerdown = hdmi_phy_8x74_powerdown,
+static const char * const hdmi_phy_8x74_reg_names[] = {
+	"core-vdda",
+	"vddio",
 };
 
-struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
-{
-	struct hdmi_phy_8x74 *phy_8x74;
-	struct hdmi_phy *phy = NULL;
-	int ret;
+static const char * const hdmi_phy_8x74_clk_names[] = {
+	"iface_clk",
+	"alt_iface_clk"
+};
 
-	phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
-	if (!phy_8x74) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	phy = &phy_8x74->base;
-
-	phy->funcs = &hdmi_phy_8x74_funcs;
-
-	/* for 8x74, the phy mmio is mapped separately: */
-	phy_8x74->mmio = msm_ioremap(hdmi->pdev,
-			"phy_physical", "HDMI_8x74");
-	if (IS_ERR(phy_8x74->mmio)) {
-		ret = PTR_ERR(phy_8x74->mmio);
-		goto fail;
-	}
-
-	return phy;
-
-fail:
-	if (phy)
-		hdmi_phy_8x74_destroy(phy);
-	return ERR_PTR(ret);
-}
+const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
+	.type = MSM_HDMI_PHY_8x74,
+	.powerup = hdmi_phy_8x74_powerup,
+	.powerdown = hdmi_phy_8x74_powerdown,
+	.reg_names = hdmi_phy_8x74_reg_names,
+	.num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names),
+	.clk_names = hdmi_phy_8x74_clk_names,
+	.num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
new file mode 100644
index 0000000..92da69a
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include "hdmi.h"
+
+struct hdmi_pll_8960 {
+	struct platform_device *pdev;
+	struct clk_hw clk_hw;
+	void __iomem *mmio;
+
+	unsigned long pixclk;
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+	unsigned long rate;
+	int num_reg;
+	struct {
+		u32 val;
+		u32 reg;
+	} conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+	{ 154000000, 14, {
+		{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+			}
+	},
+	/* 1080p60/1080p50 case */
+	{ 148500000, 27, {
+		{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
+		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
+		{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
+		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
+		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
+		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
+		{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
+			}
+	},
+	{ 108000000, 13, {
+		{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+			}
+	},
+	/* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+	{ 74250000, 8, {
+		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
+		{ 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+			}
+	},
+	{ 74176000, 14, {
+		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+			}
+	},
+	{ 65000000, 14, {
+		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+			}
+	},
+	/* 480p60/480i60 */
+	{ 27030000, 18, {
+		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
+		{ 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
+			}
+	},
+	/* 576p50/576i50 */
+	{ 27000000, 27, {
+		{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
+		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
+		{ 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
+		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
+		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
+		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
+		{ 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
+			}
+	},
+	/* 640x480p60 */
+	{ 25200000, 27, {
+		{ 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+		{ 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG  },
+		{ 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+		{ 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B       },
+		{ 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2      },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3      },
+		{ 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0  },
+		{ 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1  },
+		{ 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2  },
+		{ 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7   },
+			}
+	},
+};
+
+static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data)
+{
+	msm_writel(data, pll->mmio + reg);
+}
+
+static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg)
+{
+	return msm_readl(pll->mmio + reg);
+}
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll)
+{
+	return platform_get_drvdata(pll->pdev);
+}
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+	struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+	struct hdmi_phy *phy = pll_get_phy(pll);
+	int timeout_count, pll_lock_retry = 10;
+	unsigned int val;
+
+	DBG("");
+
+	/* Assert PLL S/W reset */
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+	/* Wait for a short time before de-asserting
+	 * to allow the hardware to complete its job.
+	 * This much of delay should be fine for hardware
+	 * to assert and de-assert.
+	 */
+	udelay(10);
+
+	/* De-assert PLL S/W reset */
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+	val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+	val |= HDMI_8960_PHY_REG12_SW_RESET;
+	/* Assert PHY S/W reset */
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+	val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+	/*
+	 * Wait for a short time before de-asserting to allow the hardware to
+	 * complete its job. This much of delay should be fine for hardware to
+	 * assert and de-assert.
+	 */
+	udelay(10);
+	/* De-assert PHY S/W reset */
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2,  0x3f);
+
+	val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+	val |= HDMI_8960_PHY_REG12_PWRDN_B;
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+	/* Wait 10 us for enabling global power for PHY */
+	mb();
+	udelay(10);
+
+	val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+	val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+	val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80);
+
+	timeout_count = 1000;
+	while (--pll_lock_retry > 0) {
+		/* are we there yet? */
+		val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0);
+		if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+			break;
+
+		udelay(1);
+
+		if (--timeout_count > 0)
+			continue;
+
+		/*
+		 * PLL has still not locked.
+		 * Do a software reset and try again
+		 * Assert PLL S/W reset first
+		 */
+		pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+		udelay(10);
+		pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+		/*
+		 * Wait for a short duration for the PLL calibration
+		 * before checking if the PLL gets locked
+		 */
+		udelay(350);
+
+		timeout_count = 1000;
+	}
+
+	return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+	struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+	struct hdmi_phy *phy = pll_get_phy(pll);
+	unsigned int val;
+
+	DBG("");
+
+	val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+	val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+	hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+
+	val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+	val |= HDMI_8960_PHY_REG12_SW_RESET;
+	val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+	pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+	/* Make sure HDMI PHY/PLL are powered down */
+	mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+	int i;
+
+	for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+		if (rate > freqtbl[i].rate)
+			return &freqtbl[i - 1];
+
+	return &freqtbl[i - 1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+
+	return pll->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	const struct pll_rate *pll_rate = find_rate(rate);
+
+	return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+	const struct pll_rate *pll_rate = find_rate(rate);
+	int i;
+
+	DBG("rate=%lu", rate);
+
+	for (i = 0; i < pll_rate->num_reg; i++)
+		pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+	pll->pixclk = rate;
+
+	return 0;
+}
+
+static const struct clk_ops hdmi_pll_ops = {
+	.enable = hdmi_pll_enable,
+	.disable = hdmi_pll_disable,
+	.recalc_rate = hdmi_pll_recalc_rate,
+	.round_rate = hdmi_pll_round_rate,
+	.set_rate = hdmi_pll_set_rate,
+};
+
+static const char * const hdmi_pll_parents[] = {
+	"pxo",
+};
+
+static struct clk_init_data pll_init = {
+	.name = "hdmi_pll",
+	.ops = &hdmi_pll_ops,
+	.parent_names = hdmi_pll_parents,
+	.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct hdmi_pll_8960 *pll;
+	struct clk *clk;
+	int i;
+
+	/* sanity check: */
+	for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+		if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate))
+			return -EINVAL;
+
+	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+	if (!pll)
+		return -ENOMEM;
+
+	pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
+	if (IS_ERR(pll->mmio)) {
+		dev_err(dev, "failed to map pll base\n");
+		return -ENOMEM;
+	}
+
+	pll->pdev = pdev;
+	pll->clk_hw.init = &pll_init;
+
+	clk = devm_clk_register(dev, &pll->clk_hw);
+	if (IS_ERR(clk)) {
+		dev_err(dev, "failed to register pll clock\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbd9cc4d..6eab7d0 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index d5d9457..6688e79 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 28df397..e233acf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -147,13 +147,6 @@
 	kfree(mdp4_crtc);
 }
 
-static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 /* statically (for now) map planes to mixer stage (z-order): */
 static const int idxs[] = {
 		[VG1]  = 1,
@@ -361,13 +354,6 @@
 	request_pending(crtc, PENDING_FLIP);
 }
 
-static int mdp4_crtc_set_property(struct drm_crtc *crtc,
-		struct drm_property *property, uint64_t val)
-{
-	// XXX
-	return -EINVAL;
-}
-
 #define CURSOR_WIDTH 64
 #define CURSOR_HEIGHT 64
 
@@ -499,7 +485,7 @@
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = mdp4_crtc_destroy,
 	.page_flip = drm_atomic_helper_page_flip,
-	.set_property = mdp4_crtc_set_property,
+	.set_property = drm_atomic_helper_crtc_set_property,
 	.cursor_set = mdp4_crtc_cursor_set,
 	.cursor_move = mdp4_crtc_cursor_move,
 	.reset = drm_atomic_helper_crtc_reset,
@@ -508,7 +494,6 @@
 };
 
 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
-	.mode_fixup = mdp4_crtc_mode_fixup,
 	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
 	.disable = mdp4_crtc_disable,
 	.enable = mdp4_crtc_enable,
@@ -575,13 +560,6 @@
 	return mdp4_crtc->vblank.irqmask;
 }
 
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-	DBG("%s: cancel: %p", mdp4_crtc->name, file);
-	complete_flip(crtc, file);
-}
-
 /* set dma config, ie. the format the encoder wants. */
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
 {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
index 2f57e94..106f0e7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -47,13 +47,6 @@
 	.destroy = mdp4_dsi_encoder_destroy,
 };
 
-static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
 				      struct drm_display_mode *mode,
 				      struct drm_display_mode *adjusted_mode)
@@ -163,7 +156,6 @@
 }
 
 static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
-	.mode_fixup = mdp4_dsi_encoder_mode_fixup,
 	.mode_set = mdp4_dsi_encoder_mode_set,
 	.disable = mdp4_dsi_encoder_disable,
 	.enable = mdp4_dsi_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index a21df54..35ad78a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -94,13 +94,6 @@
 	.destroy = mdp4_dtv_encoder_destroy,
 };
 
-static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
@@ -234,7 +227,6 @@
 }
 
 static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
-	.mode_fixup = mdp4_dtv_encoder_mode_fixup,
 	.mode_set = mdp4_dtv_encoder_mode_set,
 	.enable = mdp4_dtv_encoder_enable,
 	.disable = mdp4_dtv_encoder_disable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 5a8e3d6..76e1dfb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -179,19 +179,20 @@
 	}
 }
 
-static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
-	struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
-	unsigned i;
-
-	for (i = 0; i < priv->num_crtcs; i++)
-		mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
+static const char * const iommu_ports[] = {
+	"mdp_port0_cb0", "mdp_port1_cb0",
+};
 
 static void mdp4_destroy(struct msm_kms *kms)
 {
 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	struct msm_mmu *mmu = mdp4_kms->mmu;
+
+	if (mmu) {
+		mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+		mmu->funcs->destroy(mmu);
+	}
+
 	if (mdp4_kms->blank_cursor_iova)
 		msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
 	if (mdp4_kms->blank_cursor_bo)
@@ -213,7 +214,6 @@
 		.wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
 		.get_format      = mdp_get_format,
 		.round_pixclk    = mdp4_round_pixclk,
-		.preclose        = mdp4_preclose,
 		.destroy         = mdp4_destroy,
 	},
 	.set_irqmask         = mdp4_set_irqmask,
@@ -326,7 +326,7 @@
 
 		if (priv->hdmi) {
 			/* Construct bridge/connector for HDMI: */
-			ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+			ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
 			if (ret) {
 				dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
 				return ret;
@@ -457,10 +457,6 @@
 	return ret;
 }
 
-static const char *iommu_ports[] = {
-		"mdp_port0_cb0", "mdp_port1_cb0",
-};
-
 struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 {
 	struct platform_device *pdev = dev->platformdev;
@@ -565,6 +561,8 @@
 				ARRAY_SIZE(iommu_ports));
 		if (ret)
 			goto fail;
+
+		mdp4_kms->mmu = mmu;
 	} else {
 		dev_info(dev->dev, "no iommu, fallback to phys "
 				"contig buffers for scanout\n");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index d2c96ef..b282871 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -45,6 +45,7 @@
 	struct clk *pclk;
 	struct clk *lut_clk;
 	struct clk *axi_clk;
+	struct msm_mmu *mmu;
 
 	struct mdp_irq error_handler;
 
@@ -199,7 +200,6 @@
 		enum mdp4_pipe pipe_id, bool private_plane);
 
 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index cd63fed..bc3d8e7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -260,13 +260,6 @@
 	mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
 }
 
-static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
@@ -430,7 +423,6 @@
 }
 
 static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
-	.mode_fixup = mdp4_lcdc_encoder_mode_fixup,
 	.mode_set = mdp4_lcdc_encoder_mode_set,
 	.disable = mdp4_lcdc_encoder_disable,
 	.enable = mdp4_lcdc_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c37da9c..b275ce1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 1aa21db..69094cb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -188,13 +188,6 @@
 	.destroy = mdp5_cmd_encoder_destroy,
 };
 
-static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
@@ -256,7 +249,6 @@
 }
 
 static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
-	.mode_fixup = mdp5_cmd_encoder_mode_fixup,
 	.mode_set = mdp5_cmd_encoder_mode_set,
 	.disable = mdp5_cmd_encoder_disable,
 	.enable = mdp5_cmd_encoder_enable,
@@ -340,4 +332,3 @@
 
 	return ERR_PTR(ret);
 }
-
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 20cee5ce..9673b95 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -185,13 +185,6 @@
 	kfree(mdp5_crtc);
 }
 
-static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
@@ -468,13 +461,6 @@
 	request_pending(crtc, PENDING_FLIP);
 }
 
-static int mdp5_crtc_set_property(struct drm_crtc *crtc,
-		struct drm_property *property, uint64_t val)
-{
-	// XXX
-	return -EINVAL;
-}
-
 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -625,7 +611,7 @@
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = mdp5_crtc_destroy,
 	.page_flip = drm_atomic_helper_page_flip,
-	.set_property = mdp5_crtc_set_property,
+	.set_property = drm_atomic_helper_crtc_set_property,
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -634,7 +620,6 @@
 };
 
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
-	.mode_fixup = mdp5_crtc_mode_fixup,
 	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
 	.disable = mdp5_crtc_disable,
 	.enable = mdp5_crtc_enable,
@@ -721,12 +706,6 @@
 	return mdp5_crtc->vblank.irqmask;
 }
 
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-	DBG("cancel: %p", file);
-	complete_flip(crtc, file);
-}
-
 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
 		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 0d737ca..1d95f9f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -112,13 +112,6 @@
 	.destroy = mdp5_encoder_destroy,
 };
 
-static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
 		struct drm_display_mode *mode,
 		struct drm_display_mode *adjusted_mode)
@@ -287,7 +280,6 @@
 }
 
 static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
-	.mode_fixup = mdp5_encoder_mode_fixup,
 	.mode_set = mdp5_encoder_mode_set,
 	.disable = mdp5_encoder_disable,
 	.enable = mdp5_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e115318..484b4d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -117,16 +117,6 @@
 		return mdp5_encoder_set_split_display(encoder, slave_encoder);
 }
 
-static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-	struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
-	unsigned i;
-
-	for (i = 0; i < priv->num_crtcs; i++)
-		mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
-
 static void mdp5_destroy(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -164,7 +154,6 @@
 		.get_format      = mdp_get_format,
 		.round_pixclk    = mdp5_round_pixclk,
 		.set_split_display = mdp5_set_split_display,
-		.preclose        = mdp5_preclose,
 		.destroy         = mdp5_destroy,
 	},
 	.set_irqmask         = mdp5_set_irqmask,
@@ -295,7 +284,7 @@
 			break;
 		}
 
-		ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+		ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
 		break;
 	case INTF_DSI:
 	{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 00730ba..9a25898 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -211,7 +211,6 @@
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
 		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 0aec1ac..452e351 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -9,7 +9,7 @@
 
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
 - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
 - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
 - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
 - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a30807..d52910e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -61,7 +61,7 @@
 #endif
 
 static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
 module_param(vram, charp, 0);
 
 /*
@@ -196,6 +196,11 @@
 	}
 
 	drm_kms_helper_poll_fini(dev);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	if (fbdev && priv->fbdev)
+		msm_fbdev_free(dev);
+#endif
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
 
@@ -1116,7 +1121,7 @@
 	DBG("init");
 	msm_dsi_register();
 	msm_edp_register();
-	hdmi_register();
+	msm_hdmi_register();
 	adreno_register();
 	return platform_driver_register(&msm_platform_driver);
 }
@@ -1125,7 +1130,7 @@
 {
 	DBG("fini");
 	platform_driver_unregister(&msm_platform_driver);
-	hdmi_unregister();
+	msm_hdmi_unregister();
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c1e7bba..870dbe5 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -240,12 +240,13 @@
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
 
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+void msm_fbdev_free(struct drm_device *dev);
 
 struct hdmi;
-int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
 		struct drm_encoder *encoder);
-void __init hdmi_register(void);
-void __exit hdmi_unregister(void);
+void __init msm_hdmi_register(void);
+void __exit msm_hdmi_unregister(void);
 
 struct msm_edp;
 void __init msm_edp_register(void);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d95af6e..d9759bf 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -62,12 +62,8 @@
 	struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
 	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
 	struct drm_gem_object *drm_obj = fbdev->bo;
-	struct drm_device *dev = helper->dev;
 	int ret = 0;
 
-	if (drm_device_is_unplugged(dev))
-		return -ENODEV;
-
 	ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
 	if (ret) {
 		pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6d7cd3f..43d2181 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -323,28 +323,27 @@
 	struct drm_msm_gem_submit *args = data;
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_gem_submit *submit;
-	struct msm_gpu *gpu;
+	struct msm_gpu *gpu = priv->gpu;
 	unsigned i;
 	int ret;
 
+	if (!gpu)
+		return -ENXIO;
+
 	/* for now, we just have 3d pipe.. eventually this would need to
 	 * be more clever to dispatch to appropriate gpu module:
 	 */
 	if (args->pipe != MSM_PIPE_3D0)
 		return -EINVAL;
 
-	gpu = priv->gpu;
-
 	if (args->nr_cmds > MAX_CMDS)
 		return -EINVAL;
 
-	mutex_lock(&dev->struct_mutex);
-
 	submit = submit_create(dev, gpu, args->nr_bos);
-	if (!submit) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!submit)
+		return -ENOMEM;
+
+	mutex_lock(&dev->struct_mutex);
 
 	ret = submit_lookup_objects(submit, args, file);
 	if (ret)
@@ -419,8 +418,7 @@
 	args->fence = submit->fence;
 
 out:
-	if (submit)
-		submit_cleanup(submit, !!ret);
+	submit_cleanup(submit, !!ret);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f19..a7a0b6d 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -31,13 +31,15 @@
 	return 0;
 }
 
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
+			    int cnt)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 	return iommu_attach_device(iommu->domain, mmu->dev);
 }
 
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
+			     int cnt)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
 	iommu_detach_device(iommu->domain, mmu->dev);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 7cd88d9..b8ca9a0 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,8 +21,8 @@
 #include <linux/iommu.h>
 
 struct msm_mmu_funcs {
-	int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
-	void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
+	int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
+	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
 			unsigned len, int prot);
 	int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f04397..55ccbf0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -227,13 +227,6 @@
 	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
 }
 
-static bool
-nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-		   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void
 nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
@@ -1093,7 +1086,6 @@
 	.dpms = nv_crtc_dpms,
 	.prepare = nv_crtc_prepare,
 	.commit = nv_crtc_commit,
-	.mode_fixup = nv_crtc_mode_fixup,
 	.mode_set = nv_crtc_mode_set,
 	.mode_set_base = nv04_crtc_mode_set_base,
 	.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 85b7827..46301ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -3,19 +3,27 @@
 
 struct kepler_channel_gpfifo_a_v0 {
 	__u8  version;
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR                               0x01
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPDEC                           0x02
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPPP                            0x04
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSVLD                            0x08
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0                              0x10
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1                              0x20
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC                              0x40
-	__u8  engine;
+	__u8  pad01[5];
 	__u16 chid;
+#define NVA06F_V0_ENGINE_SW                                          0x00000001
+#define NVA06F_V0_ENGINE_GR                                          0x00000002
+#define NVA06F_V0_ENGINE_SEC                                         0x00000004
+#define NVA06F_V0_ENGINE_MSVLD                                       0x00000010
+#define NVA06F_V0_ENGINE_MSPDEC                                      0x00000020
+#define NVA06F_V0_ENGINE_MSPPP                                       0x00000040
+#define NVA06F_V0_ENGINE_MSENC                                       0x00000080
+#define NVA06F_V0_ENGINE_VIC                                         0x00000100
+#define NVA06F_V0_ENGINE_NVDEC                                       0x00000200
+#define NVA06F_V0_ENGINE_NVENC0                                      0x00000400
+#define NVA06F_V0_ENGINE_NVENC1                                      0x00000800
+#define NVA06F_V0_ENGINE_CE0                                         0x00010000
+#define NVA06F_V0_ENGINE_CE1                                         0x00020000
+#define NVA06F_V0_ENGINE_CE2                                         0x00040000
+	__u32 engines;
 	__u32 ilength;
 	__u64 ioffset;
 	__u64 vm;
 };
 
-#define KEPLER_CHANNEL_GPFIFO_A_V0_NTFY_UEVENT                             0x00
+#define NVA06F_V0_NTFY_UEVENT                                              0x00
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 4179cd6..982aad8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -37,6 +37,7 @@
 #define G82_CHANNEL_GPFIFO                            /* cl826f.h */ 0x0000826f
 #define FERMI_CHANNEL_GPFIFO                          /* cl906f.h */ 0x0000906f
 #define KEPLER_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000a06f
+#define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
@@ -48,7 +49,7 @@
 #define GK104_DISP                                    /* cl5070.h */ 0x00009170
 #define GK110_DISP                                    /* cl5070.h */ 0x00009270
 #define GM107_DISP                                    /* cl5070.h */ 0x00009470
-#define GM204_DISP                                    /* cl5070.h */ 0x00009570
+#define GM200_DISP                                    /* cl5070.h */ 0x00009570
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -84,7 +85,7 @@
 #define GK104_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000917d
 #define GK110_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000927d
 #define GM107_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000947d
-#define GM204_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000957d
+#define GM200_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000957d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index e0ed2f4..bcb9817 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -62,6 +62,7 @@
 #define nvxx_gpio(a) nvxx_device(a)->gpio
 #define nvxx_clk(a) nvxx_device(a)->clk
 #define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_iccsense(a) nvxx_device(a)->iccsense
 #define nvxx_therm(a) nvxx_device(a)->therm
 #define nvxx_volt(a) nvxx_device(a)->volt
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 913192c..4993a86 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -22,30 +22,41 @@
 	NVKM_SUBDEV_BAR,
 	NVKM_SUBDEV_PMU,
 	NVKM_SUBDEV_VOLT,
+	NVKM_SUBDEV_ICCSENSE,
 	NVKM_SUBDEV_THERM,
 	NVKM_SUBDEV_CLK,
+	NVKM_SUBDEV_SECBOOT,
 
-	NVKM_ENGINE_DMAOBJ,
-	NVKM_ENGINE_IFB,
-	NVKM_ENGINE_FIFO,
-	NVKM_ENGINE_SW,
-	NVKM_ENGINE_GR,
-	NVKM_ENGINE_MPEG,
-	NVKM_ENGINE_ME,
-	NVKM_ENGINE_VP,
-	NVKM_ENGINE_CIPHER,
 	NVKM_ENGINE_BSP,
-	NVKM_ENGINE_MSPPP,
+
 	NVKM_ENGINE_CE0,
 	NVKM_ENGINE_CE1,
 	NVKM_ENGINE_CE2,
-	NVKM_ENGINE_VIC,
-	NVKM_ENGINE_MSENC,
+	NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+
+	NVKM_ENGINE_CIPHER,
 	NVKM_ENGINE_DISP,
-	NVKM_ENGINE_PM,
-	NVKM_ENGINE_MSVLD,
-	NVKM_ENGINE_SEC,
+	NVKM_ENGINE_DMAOBJ,
+	NVKM_ENGINE_FIFO,
+	NVKM_ENGINE_GR,
+	NVKM_ENGINE_IFB,
+	NVKM_ENGINE_ME,
+	NVKM_ENGINE_MPEG,
+	NVKM_ENGINE_MSENC,
 	NVKM_ENGINE_MSPDEC,
+	NVKM_ENGINE_MSPPP,
+	NVKM_ENGINE_MSVLD,
+
+	NVKM_ENGINE_NVENC0,
+	NVKM_ENGINE_NVENC1,
+	NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+
+	NVKM_ENGINE_NVDEC,
+	NVKM_ENGINE_PM,
+	NVKM_ENGINE_SEC,
+	NVKM_ENGINE_SW,
+	NVKM_ENGINE_VIC,
+	NVKM_ENGINE_VP,
 
 	NVKM_SUBDEV_NR
 };
@@ -109,6 +120,7 @@
 	struct nvkm_gpio *gpio;
 	struct nvkm_i2c *i2c;
 	struct nvkm_subdev *ibus;
+	struct nvkm_iccsense *iccsense;
 	struct nvkm_instmem *imem;
 	struct nvkm_ltc *ltc;
 	struct nvkm_mc *mc;
@@ -116,6 +128,7 @@
 	struct nvkm_subdev *mxm;
 	struct nvkm_pci *pci;
 	struct nvkm_pmu *pmu;
+	struct nvkm_secboot *secboot;
 	struct nvkm_therm *therm;
 	struct nvkm_timer *timer;
 	struct nvkm_volt *volt;
@@ -134,6 +147,8 @@
 	struct nvkm_engine *mspdec;
 	struct nvkm_engine *msppp;
 	struct nvkm_engine *msvld;
+	struct nvkm_engine *nvenc[2];
+	struct nvkm_engine *nvdec;
 	struct nvkm_pm *pm;
 	struct nvkm_engine *sec;
 	struct nvkm_sw *sw;
@@ -164,46 +179,50 @@
 struct nvkm_device_chip {
 	const char *name;
 
-	int (*bar    )(struct nvkm_device *, int idx, struct nvkm_bar **);
-	int (*bios   )(struct nvkm_device *, int idx, struct nvkm_bios **);
-	int (*bus    )(struct nvkm_device *, int idx, struct nvkm_bus **);
-	int (*clk    )(struct nvkm_device *, int idx, struct nvkm_clk **);
-	int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
-	int (*fb     )(struct nvkm_device *, int idx, struct nvkm_fb **);
-	int (*fuse   )(struct nvkm_device *, int idx, struct nvkm_fuse **);
-	int (*gpio   )(struct nvkm_device *, int idx, struct nvkm_gpio **);
-	int (*i2c    )(struct nvkm_device *, int idx, struct nvkm_i2c **);
-	int (*ibus   )(struct nvkm_device *, int idx, struct nvkm_subdev **);
-	int (*imem   )(struct nvkm_device *, int idx, struct nvkm_instmem **);
-	int (*ltc    )(struct nvkm_device *, int idx, struct nvkm_ltc **);
-	int (*mc     )(struct nvkm_device *, int idx, struct nvkm_mc **);
-	int (*mmu    )(struct nvkm_device *, int idx, struct nvkm_mmu **);
-	int (*mxm    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
-	int (*pci    )(struct nvkm_device *, int idx, struct nvkm_pci **);
-	int (*pmu    )(struct nvkm_device *, int idx, struct nvkm_pmu **);
-	int (*therm  )(struct nvkm_device *, int idx, struct nvkm_therm **);
-	int (*timer  )(struct nvkm_device *, int idx, struct nvkm_timer **);
-	int (*volt   )(struct nvkm_device *, int idx, struct nvkm_volt **);
+	int (*bar     )(struct nvkm_device *, int idx, struct nvkm_bar **);
+	int (*bios    )(struct nvkm_device *, int idx, struct nvkm_bios **);
+	int (*bus     )(struct nvkm_device *, int idx, struct nvkm_bus **);
+	int (*clk     )(struct nvkm_device *, int idx, struct nvkm_clk **);
+	int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
+	int (*fb      )(struct nvkm_device *, int idx, struct nvkm_fb **);
+	int (*fuse    )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+	int (*gpio    )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+	int (*i2c     )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+	int (*ibus    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
+	int (*imem    )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+	int (*ltc     )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+	int (*mc      )(struct nvkm_device *, int idx, struct nvkm_mc **);
+	int (*mmu     )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+	int (*mxm     )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*pci     )(struct nvkm_device *, int idx, struct nvkm_pci **);
+	int (*pmu     )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+	int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
+	int (*therm   )(struct nvkm_device *, int idx, struct nvkm_therm **);
+	int (*timer   )(struct nvkm_device *, int idx, struct nvkm_timer **);
+	int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
 
-	int (*bsp    )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*ce[3]  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*disp   )(struct nvkm_device *, int idx, struct nvkm_disp **);
-	int (*dma    )(struct nvkm_device *, int idx, struct nvkm_dma **);
-	int (*fifo   )(struct nvkm_device *, int idx, struct nvkm_fifo **);
-	int (*gr     )(struct nvkm_device *, int idx, struct nvkm_gr **);
-	int (*ifb    )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*me     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*mpeg   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msenc  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msppp  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*msvld  )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*pm     )(struct nvkm_device *, int idx, struct nvkm_pm **);
-	int (*sec    )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*sw     )(struct nvkm_device *, int idx, struct nvkm_sw **);
-	int (*vic    )(struct nvkm_device *, int idx, struct nvkm_engine **);
-	int (*vp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*ce[3]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*cipher  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*disp    )(struct nvkm_device *, int idx, struct nvkm_disp **);
+	int (*dma     )(struct nvkm_device *, int idx, struct nvkm_dma **);
+	int (*fifo    )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+	int (*gr      )(struct nvkm_device *, int idx, struct nvkm_gr **);
+	int (*ifb     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*me      )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mpeg    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msenc   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mspdec  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*nvdec   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
+	int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*sw      )(struct nvkm_device *, int idx, struct nvkm_sw **);
+	int (*vic     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*vp      )(struct nvkm_device *, int idx, struct nvkm_engine **);
 };
 
 struct nvkm_device *nvkm_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
new file mode 100644
index 0000000..a626ce3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_FIRMWARE_H__
+#define __NVKM_FIRMWARE_H__
+
+#include <core/device.h>
+
+int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
+		      const struct firmware **fw);
+
+void nvkm_firmware_put(const struct firmware *fw);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index d4f56ea..c23da4f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -37,4 +37,8 @@
 int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
 		    struct nvkm_vma *);
 void nvkm_gpuobj_unmap(struct nvkm_vma *);
+void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
+			   u32 length);
+void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
+			     u32 length);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e2e22cd..594d719 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -5,5 +5,6 @@
 int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index efc74d0..d4fdce2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -31,5 +31,5 @@
 int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 9e66449..15ddfcf 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -60,8 +60,10 @@
 int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index f126e54..6515f58 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -40,7 +40,6 @@
 int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
new file mode 100644
index 0000000..748ea9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_MSENC_H__
+#define __NVKM_MSENC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
new file mode 100644
index 0000000..30b76d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_NVDEC_H__
+#define __NVKM_NVDEC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
new file mode 100644
index 0000000..8a81932
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_NVENC_H__
+#define __NVKM_NVENC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
new file mode 100644
index 0000000..2b0dc4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_VIC_H__
+#define __NVKM_VIC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index 6d3bedc..bb49bd5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -5,6 +5,9 @@
 	NVBIOS_EXTDEV_VT1103M		= 0x40,
 	NVBIOS_EXTDEV_PX3540		= 0x41,
 	NVBIOS_EXTDEV_VT1105M		= 0x42, /* or close enough... */
+	NVBIOS_EXTDEV_INA219		= 0x4c,
+	NVBIOS_EXTDEV_INA209		= 0x4d,
+	NVBIOS_EXTDEV_INA3221		= 0x4e,
 	NVBIOS_EXTDEV_ADT7473		= 0x70, /* can also be a LM64 */
 	NVBIOS_EXTDEV_HDCP_EEPROM	= 0x90,
 	NVBIOS_EXTDEV_NONE		= 0xff,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
new file mode 100644
index 0000000..9cb9747
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -0,0 +1,16 @@
+#ifndef __NVBIOS_ICCSENSE_H__
+#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_t {
+	u8 mode;
+	u8 extdev_id;
+	u8 resistor_mohm;
+	u8 rail;
+};
+
+struct nvbios_iccsense {
+	int nr_entry;
+	struct pwr_rail_t *rail;
+};
+
+int nvbios_iccsense_parse(struct nvkm_bios *, struct nvbios_iccsense *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index 6b33bc0..fb54417 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -121,4 +121,5 @@
 int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 6c1407f..193626c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -27,5 +27,5 @@
 int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 6b6224d..a63c5ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -89,7 +89,7 @@
 int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
 nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
@@ -108,6 +108,22 @@
 }
 
 static inline int
+nv_rd16i2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
+{
+	u8 val[2];
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = addr, .flags = I2C_M_RD, .len = 2, .buf = val },
+	};
+
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
+	if (ret != 2)
+		return -EIO;
+
+	return val[0] << 8 | val[1];
+}
+
+static inline int
 nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
 {
 	u8 buf[2] = { reg, val };
@@ -122,6 +138,21 @@
 	return 0;
 }
 
+static inline int
+nv_wr16i2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u16 val)
+{
+	u8 buf[3] = { reg, val >> 8, val & 0xff};
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 3, .buf = buf },
+	};
+
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
+	if (ret != 1)
+		return -EIO;
+
+	return 0;
+}
+
 static inline bool
 nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
 {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index ea23e24..c4ecf25 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -6,5 +6,5 @@
 int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gm204_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
new file mode 100644
index 0000000..530c621
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_ICCSENSE_H__
+#define __NVKM_ICCSENSE_H__
+
+#include <core/subdev.h>
+
+struct nkvm_iccsense_rail;
+struct nvkm_iccsense {
+	struct nvkm_subdev subdev;
+	u8 rail_count;
+	bool data_valid;
+	struct nvkm_iccsense_rail *rails;
+};
+
+int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
+int nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx);
+int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 0ffa2ec..c6b90b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -37,5 +37,5 @@
 int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm204_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
new file mode 100644
index 0000000..c6edd95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECURE_BOOT_H__
+#define __NVKM_SECURE_BOOT_H__
+
+#include <core/subdev.h>
+
+enum nvkm_secboot_falcon {
+	NVKM_SECBOOT_FALCON_PMU	= 0,
+	NVKM_SECBOOT_FALCON_RESERVED = 1,
+	NVKM_SECBOOT_FALCON_FECS = 2,
+	NVKM_SECBOOT_FALCON_GPCCS = 3,
+	NVKM_SECBOOT_FALCON_END = 4,
+	NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
+};
+
+/**
+ * @base:		base IO address of the falcon performing secure boot
+ * @irq_mask:		IRQ mask of the falcon performing secure boot
+ * @enable_mask:	enable mask of the falcon performing secure boot
+*/
+struct nvkm_secboot {
+	const struct nvkm_secboot_func *func;
+	struct nvkm_subdev subdev;
+
+	u32 base;
+	u32 irq_mask;
+	u32 enable_mask;
+};
+#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
+
+bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon);
+int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon);
+
+int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b458d04..feff55c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -20,4 +20,5 @@
 int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 50f52ff..a59e524 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -263,13 +263,23 @@
 	/* hack to allow channel engine type specification on kepler */
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 		if (init->fb_ctxdma_handle != ~0)
-			init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
-		else
-			init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+			init->fb_ctxdma_handle = NVA06F_V0_ENGINE_GR;
+		else {
+			init->fb_ctxdma_handle = 0;
+#define _(A,B) if (init->tt_ctxdma_handle & (A)) init->fb_ctxdma_handle |= (B)
+			_(0x01, NVA06F_V0_ENGINE_GR);
+			_(0x02, NVA06F_V0_ENGINE_MSPDEC);
+			_(0x04, NVA06F_V0_ENGINE_MSPPP);
+			_(0x08, NVA06F_V0_ENGINE_MSVLD);
+			_(0x10, NVA06F_V0_ENGINE_CE0);
+			_(0x20, NVA06F_V0_ENGINE_CE1);
+			_(0x40, NVA06F_V0_ENGINE_MSENC);
+#undef _
+		}
 
 		/* allow flips to be executed if this is a graphics channel */
 		init->tt_ctxdma_handle = 0;
-		if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
+		if (init->fb_ctxdma_handle == NVA06F_V0_ENGINE_GR)
 			init->tt_ctxdma_handle = 1;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d5e6938..cdf5227 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -314,7 +314,7 @@
 	if (!r)
 		return;
 
-	vga_switcheroo_register_handler(&nouveau_dsm_handler);
+	vga_switcheroo_register_handler(&nouveau_dsm_handler, 0);
 }
 
 /* Must be called for Optimus models before the card can be turned off */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e3acc35..2cdaea5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1502,7 +1502,7 @@
 	}
 #endif
 
-#ifdef CONFIG_SWIOTLB
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
 		return ttm_dma_populate((void *)ttm, dev->dev);
 	}
@@ -1570,7 +1570,7 @@
 	}
 #endif
 
-#ifdef CONFIG_SWIOTLB
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 	if (swiotlb_nr_tbl()) {
 		ttm_dma_unpopulate((void *)ttm, dev->dev);
 		return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 3f804a8..879655c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -192,6 +192,7 @@
 		    u32 engine, struct nouveau_channel **pchan)
 {
 	static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+					KEPLER_CHANNEL_GPFIFO_B,
 					KEPLER_CHANNEL_GPFIFO_A,
 					FERMI_CHANNEL_GPFIFO,
 					G82_CHANNEL_GPFIFO,
@@ -217,7 +218,7 @@
 	do {
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
-			args.kepler.engine  = engine;
+			args.kepler.engines = engine;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
 			args.kepler.vm = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fcebfae..ae96ebc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -27,6 +27,7 @@
 #include <acpi/button.h>
 
 #include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
@@ -153,6 +154,17 @@
 			if (ret == 0)
 				break;
 		} else
+		if ((vga_switcheroo_handler_flags() &
+		     VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+		    nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
+		    nv_encoder->i2c) {
+			int ret;
+			vga_switcheroo_lock_ddc(dev->pdev);
+			ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
+			vga_switcheroo_unlock_ddc(dev->pdev);
+			if (ret)
+				break;
+		} else
 		if (nv_encoder->i2c) {
 			if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
 				break;
@@ -265,7 +277,14 @@
 
 	nv_encoder = nouveau_connector_ddc_detect(connector);
 	if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
-		nv_connector->edid = drm_get_edid(connector, i2c);
+		if ((vga_switcheroo_handler_flags() &
+		     VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+		    nv_connector->type == DCB_CONNECTOR_LVDS)
+			nv_connector->edid = drm_get_edid_switcheroo(connector,
+								     i2c);
+		else
+			nv_connector->edid = drm_get_edid(connector, i2c);
+
 		drm_mode_connector_update_edid_property(connector,
 							nv_connector->edid);
 		if (!nv_connector->edid) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 20935eb..7ce7fa5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -495,7 +495,7 @@
 
 	if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
 		static const u16 oclass[] = {
-			GM204_DISP,
+			GM200_DISP,
 			GM107_DISP,
 			GK110_DISP,
 			GK104_DISP,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2f2f252..d06877d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,11 +22,13 @@
  * Authors: Ben Skeggs
  */
 
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 
 #include "drmP.h"
@@ -196,6 +198,7 @@
 			break;
 		case FERMI_CHANNEL_GPFIFO:
 		case KEPLER_CHANNEL_GPFIFO_A:
+		case KEPLER_CHANNEL_GPFIFO_B:
 		case MAXWELL_CHANNEL_GPFIFO_A:
 			ret = nvc0_fence_create(drm);
 			break;
@@ -213,13 +216,13 @@
 
 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
 		ret = nouveau_channel_new(drm, &drm->device,
-					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
-					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
+					  NVA06F_V0_ENGINE_CE0 |
+					  NVA06F_V0_ENGINE_CE1,
 					  0, &drm->cechan);
 		if (ret)
 			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
-		arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
+		arg0 = NVA06F_V0_ENGINE_GR;
 		arg1 = 1;
 	} else
 	if (device->info.chipset >= 0xa3 &&
@@ -312,6 +315,15 @@
 	bool boot = false;
 	int ret;
 
+	/*
+	 * apple-gmux is needed on dual GPU MacBook Pro
+	 * to probe the panel if we're the inactive GPU.
+	 */
+	if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+	    apple_gmux_present() && pdev != vga_default_device() &&
+	    !vga_switcheroo_handler_flags())
+		return -EPROBE_DEFER;
+
 	/* remove conflicting drivers (vesafb, efifb etc) */
 	aper = alloc_apertures(3);
 	if (!aper)
@@ -364,7 +376,7 @@
 	struct pci_dev *pdev = drm->dev->pdev;
 
 	if (!pdev) {
-		DRM_INFO("not a PCI device; no HDMI\n");
+		NV_DEBUG(drm, "not a PCI device; no HDMI\n");
 		drm->hdmi_device = NULL;
 		return;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 8e13467..67edd2f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,6 +34,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_hwmon.h"
 
+#include <nvkm/subdev/iccsense.h>
 #include <nvkm/subdev/volt.h>
 
 #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
@@ -543,6 +544,24 @@
 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO,
 			  nouveau_hwmon_get_in0_label, NULL, 0);
 
+static ssize_t
+nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
+			       char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
+	int result = nvkm_iccsense_read_all(iccsense);
+
+	if (result < 0)
+		return result;
+
+	return sprintf(buf, "%i\n", result);
+}
+
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO,
+			  nouveau_hwmon_get_power1_input, NULL, 0);
+
 static struct attribute *hwmon_default_attributes[] = {
 	&sensor_dev_attr_name.dev_attr.attr,
 	&sensor_dev_attr_update_rate.dev_attr.attr,
@@ -579,6 +598,11 @@
 	NULL
 };
 
+static struct attribute *hwmon_power_attributes[] = {
+	&sensor_dev_attr_power1_input.dev_attr.attr,
+	NULL
+};
+
 static const struct attribute_group hwmon_default_attrgroup = {
 	.attrs = hwmon_default_attributes,
 };
@@ -594,6 +618,9 @@
 static const struct attribute_group hwmon_in0_attrgroup = {
 	.attrs = hwmon_in0_attributes,
 };
+static const struct attribute_group hwmon_power_attrgroup = {
+	.attrs = hwmon_power_attributes,
+};
 #endif
 
 int
@@ -603,6 +630,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
 	struct nvkm_volt *volt = nvxx_volt(&drm->device);
+	struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
 	struct nouveau_hwmon *hwmon;
 	struct device *hwmon_dev;
 	int ret = 0;
@@ -612,10 +640,7 @@
 		return -ENOMEM;
 	hwmon->dev = dev;
 
-	if (!therm || !therm->attr_get || !therm->attr_set)
-		return -ENODEV;
-
-	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+	hwmon_dev = hwmon_device_register(dev->dev);
 	if (IS_ERR(hwmon_dev)) {
 		ret = PTR_ERR(hwmon_dev);
 		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
@@ -628,26 +653,28 @@
 	if (ret)
 		goto error;
 
-	/* if the card has a working thermal sensor */
-	if (nvkm_therm_temp_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
-		if (ret)
-			goto error;
-	}
+	if (therm && therm->attr_get && therm->attr_set) {
+		/* if the card has a working thermal sensor */
+		if (nvkm_therm_temp_get(therm) >= 0) {
+			ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+			if (ret)
+				goto error;
+		}
 
-	/* if the card has a pwm fan */
-	/*XXX: incorrect, need better detection for this, some boards have
-	 *     the gpio entries for pwm fan control even when there's no
-	 *     actual fan connected to it... therm table? */
-	if (therm->fan_get && therm->fan_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj,
-					 &hwmon_pwm_fan_attrgroup);
-		if (ret)
-			goto error;
+		/* if the card has a pwm fan */
+		/*XXX: incorrect, need better detection for this, some boards have
+		 *     the gpio entries for pwm fan control even when there's no
+		 *     actual fan connected to it... therm table? */
+		if (therm->fan_get && therm->fan_get(therm) >= 0) {
+			ret = sysfs_create_group(&hwmon_dev->kobj,
+						 &hwmon_pwm_fan_attrgroup);
+			if (ret)
+				goto error;
+		}
 	}
 
 	/* if the card can read the fan rpm */
-	if (nvkm_therm_fan_sense(therm) >= 0) {
+	if (therm && nvkm_therm_fan_sense(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj,
 					 &hwmon_fan_rpm_attrgroup);
 		if (ret)
@@ -662,6 +689,13 @@
 			goto error;
 	}
 
+	if (iccsense && iccsense->data_valid && iccsense->rail_count) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_power_attrgroup);
+		if (ret)
+			goto error;
+	}
+
 	hwmon->hwmon = hwmon_dev;
 
 	return 0;
@@ -688,6 +722,7 @@
 		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
 		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
 		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup);
 
 		hwmon_device_unregister(hwmon->hwmon);
 	}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ea39216..a43445c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -297,7 +297,7 @@
 		.pushbuf = 0xb0007d00,
 	};
 	static const s32 oclass[] = {
-		GM204_DISP_CORE_CHANNEL_DMA,
+		GM200_DISP_CORE_CHANNEL_DMA,
 		GM107_DISP_CORE_CHANNEL_DMA,
 		GK110_DISP_CORE_CHANNEL_DMA,
 		GK104_DISP_CORE_CHANNEL_DMA,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index 7f66963..86a31a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -2,6 +2,7 @@
 nvkm-y += nvkm/core/engine.o
 nvkm-y += nvkm/core/enum.o
 nvkm-y += nvkm/core/event.o
+nvkm-y += nvkm/core/firmware.o
 nvkm-y += nvkm/core/gpuobj.o
 nvkm-y += nvkm/core/ioctl.o
 nvkm-y += nvkm/core/memory.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
new file mode 100644
index 0000000..34ecd4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/device.h>
+
+/**
+ * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
+ * @device	device that will use that firmware
+ * @fwname	name of firmware file to load
+ * @fw		firmware structure to load to
+ *
+ * Use this function to load firmware files in the form nvidia/chip/fwname.bin.
+ * Firmware files released by NVIDIA will always follow this format.
+ */
+int
+nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
+		  const struct firmware **fw)
+{
+	char f[64];
+	char cname[16];
+	int i;
+
+	/* Convert device name to lowercase */
+	strncpy(cname, device->chip->name, sizeof(cname));
+	cname[sizeof(cname) - 1] = '\0';
+	i = strlen(cname);
+	while (i) {
+		--i;
+		cname[i] = tolower(cname[i]);
+	}
+
+	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+	return request_firmware(fw, f, device->dev);
+}
+
+/**
+ * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
+ */
+void
+nvkm_firmware_put(const struct firmware *fw)
+{
+	release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index c3a790e..a7bd227 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -253,3 +253,23 @@
 	(*pgpuobj)->size = nvkm_memory_size(memory);
 	return 0;
 }
+
+void
+nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
+		      u32 length)
+{
+	int i;
+
+	for (i = 0; i < length; i += 4)
+		nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
+}
+
+void
+nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
+			u32 length)
+{
+	int i;
+
+	for (i = 0; i < length; i += 4)
+		((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 3216e15..89da472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -131,7 +131,7 @@
 	struct nvkm_ramht *ramht = *pramht;
 	if (ramht) {
 		nvkm_gpuobj_del(&ramht->gpuobj);
-		kfree(*pramht);
+		vfree(*pramht);
 		*pramht = NULL;
 	}
 }
@@ -143,8 +143,8 @@
 	struct nvkm_ramht *ramht;
 	int ret, i;
 
-	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
-					sizeof(*ramht->data), GFP_KERNEL)))
+	if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
+					(size >> 3) * sizeof(*ramht->data))))
 		return -ENOMEM;
 
 	ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 7de9847..3bf08cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -29,47 +29,52 @@
 
 const char *
 nvkm_subdev_name[NVKM_SUBDEV_NR] = {
-	[NVKM_SUBDEV_BAR    ] = "bar",
-	[NVKM_SUBDEV_VBIOS  ] = "bios",
-	[NVKM_SUBDEV_BUS    ] = "bus",
-	[NVKM_SUBDEV_CLK    ] = "clk",
-	[NVKM_SUBDEV_DEVINIT] = "devinit",
-	[NVKM_SUBDEV_FB     ] = "fb",
-	[NVKM_SUBDEV_FUSE   ] = "fuse",
-	[NVKM_SUBDEV_GPIO   ] = "gpio",
-	[NVKM_SUBDEV_I2C    ] = "i2c",
-	[NVKM_SUBDEV_IBUS   ] = "priv",
-	[NVKM_SUBDEV_INSTMEM] = "imem",
-	[NVKM_SUBDEV_LTC    ] = "ltc",
-	[NVKM_SUBDEV_MC     ] = "mc",
-	[NVKM_SUBDEV_MMU    ] = "mmu",
-	[NVKM_SUBDEV_MXM    ] = "mxm",
-	[NVKM_SUBDEV_PCI    ] = "pci",
-	[NVKM_SUBDEV_PMU    ] = "pmu",
-	[NVKM_SUBDEV_THERM  ] = "therm",
-	[NVKM_SUBDEV_TIMER  ] = "tmr",
-	[NVKM_SUBDEV_VOLT   ] = "volt",
-	[NVKM_ENGINE_BSP    ] = "bsp",
-	[NVKM_ENGINE_CE0    ] = "ce0",
-	[NVKM_ENGINE_CE1    ] = "ce1",
-	[NVKM_ENGINE_CE2    ] = "ce2",
-	[NVKM_ENGINE_CIPHER ] = "cipher",
-	[NVKM_ENGINE_DISP   ] = "disp",
-	[NVKM_ENGINE_DMAOBJ ] = "dma",
-	[NVKM_ENGINE_FIFO   ] = "fifo",
-	[NVKM_ENGINE_GR     ] = "gr",
-	[NVKM_ENGINE_IFB    ] = "ifb",
-	[NVKM_ENGINE_ME     ] = "me",
-	[NVKM_ENGINE_MPEG   ] = "mpeg",
-	[NVKM_ENGINE_MSENC  ] = "msenc",
-	[NVKM_ENGINE_MSPDEC ] = "mspdec",
-	[NVKM_ENGINE_MSPPP  ] = "msppp",
-	[NVKM_ENGINE_MSVLD  ] = "msvld",
-	[NVKM_ENGINE_PM     ] = "pm",
-	[NVKM_ENGINE_SEC    ] = "sec",
-	[NVKM_ENGINE_SW     ] = "sw",
-	[NVKM_ENGINE_VIC    ] = "vic",
-	[NVKM_ENGINE_VP     ] = "vp",
+	[NVKM_SUBDEV_BAR     ] = "bar",
+	[NVKM_SUBDEV_VBIOS   ] = "bios",
+	[NVKM_SUBDEV_BUS     ] = "bus",
+	[NVKM_SUBDEV_CLK     ] = "clk",
+	[NVKM_SUBDEV_DEVINIT ] = "devinit",
+	[NVKM_SUBDEV_FB      ] = "fb",
+	[NVKM_SUBDEV_FUSE    ] = "fuse",
+	[NVKM_SUBDEV_GPIO    ] = "gpio",
+	[NVKM_SUBDEV_I2C     ] = "i2c",
+	[NVKM_SUBDEV_IBUS    ] = "priv",
+	[NVKM_SUBDEV_ICCSENSE] = "iccsense",
+	[NVKM_SUBDEV_INSTMEM ] = "imem",
+	[NVKM_SUBDEV_LTC     ] = "ltc",
+	[NVKM_SUBDEV_MC      ] = "mc",
+	[NVKM_SUBDEV_MMU     ] = "mmu",
+	[NVKM_SUBDEV_MXM     ] = "mxm",
+	[NVKM_SUBDEV_PCI     ] = "pci",
+	[NVKM_SUBDEV_PMU     ] = "pmu",
+	[NVKM_SUBDEV_SECBOOT ] = "secboot",
+	[NVKM_SUBDEV_THERM   ] = "therm",
+	[NVKM_SUBDEV_TIMER   ] = "tmr",
+	[NVKM_SUBDEV_VOLT    ] = "volt",
+	[NVKM_ENGINE_BSP     ] = "bsp",
+	[NVKM_ENGINE_CE0     ] = "ce0",
+	[NVKM_ENGINE_CE1     ] = "ce1",
+	[NVKM_ENGINE_CE2     ] = "ce2",
+	[NVKM_ENGINE_CIPHER  ] = "cipher",
+	[NVKM_ENGINE_DISP    ] = "disp",
+	[NVKM_ENGINE_DMAOBJ  ] = "dma",
+	[NVKM_ENGINE_FIFO    ] = "fifo",
+	[NVKM_ENGINE_GR      ] = "gr",
+	[NVKM_ENGINE_IFB     ] = "ifb",
+	[NVKM_ENGINE_ME      ] = "me",
+	[NVKM_ENGINE_MPEG    ] = "mpeg",
+	[NVKM_ENGINE_MSENC   ] = "msenc",
+	[NVKM_ENGINE_MSPDEC  ] = "mspdec",
+	[NVKM_ENGINE_MSPPP   ] = "msppp",
+	[NVKM_ENGINE_MSVLD   ] = "msvld",
+	[NVKM_ENGINE_NVENC0  ] = "nvenc0",
+	[NVKM_ENGINE_NVENC1  ] = "nvenc1",
+	[NVKM_ENGINE_NVDEC   ] = "nvdec",
+	[NVKM_ENGINE_PM      ] = "pm",
+	[NVKM_ENGINE_SEC     ] = "sec",
+	[NVKM_ENGINE_SW      ] = "sw",
+	[NVKM_ENGINE_VIC     ] = "vic",
+	[NVKM_ENGINE_VP      ] = "vp",
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 36f7247..c2c8d2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -10,10 +10,14 @@
 include $(src)/nvkm/engine/fifo/Kbuild
 include $(src)/nvkm/engine/gr/Kbuild
 include $(src)/nvkm/engine/mpeg/Kbuild
+include $(src)/nvkm/engine/msenc/Kbuild
 include $(src)/nvkm/engine/mspdec/Kbuild
 include $(src)/nvkm/engine/msppp/Kbuild
 include $(src)/nvkm/engine/msvld/Kbuild
+include $(src)/nvkm/engine/nvenc/Kbuild
+include $(src)/nvkm/engine/nvdec/Kbuild
 include $(src)/nvkm/engine/pm/Kbuild
 include $(src)/nvkm/engine/sec/Kbuild
 include $(src)/nvkm/engine/sw/Kbuild
+include $(src)/nvkm/engine/vic/Kbuild
 include $(src)/nvkm/engine/vp/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index fa8cda7..9c19d59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -1,4 +1,5 @@
 nvkm-y += nvkm/engine/ce/gt215.o
 nvkm-y += nvkm/engine/ce/gf100.o
 nvkm-y += nvkm/engine/ce/gk104.o
-nvkm-y += nvkm/engine/ce/gm204.o
+nvkm-y += nvkm/engine/ce/gm107.o
+nvkm-y += nvkm/engine/ce/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
similarity index 84%
copy from drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
copy to drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index 8eaa72a..4c2f429 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2016 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -26,28 +26,29 @@
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
-gm204_ce = {
+gm107_ce = {
 	.intr = gk104_ce_intr,
 	.sclass = {
+		{ -1, -1, KEPLER_DMA_COPY_A },
 		{ -1, -1, MAXWELL_DMA_COPY_A },
 		{}
 	}
 };
 
 int
-gm204_ce_new(struct nvkm_device *device, int index,
+gm107_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
 	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm107_ce, device, index,
 					0x00000040, true, pengine);
 	} else
 	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm107_ce, device, index,
 					0x00000080, true, pengine);
 	} else
 	if (index == NVKM_ENGINE_CE2) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm107_ce, device, index,
 					0x00200000, true, pengine);
 	}
 	return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
similarity index 87%
rename from drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index 8eaa72a..13f07b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -26,7 +26,7 @@
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
-gm204_ce = {
+gm200_ce = {
 	.intr = gk104_ce_intr,
 	.sclass = {
 		{ -1, -1, MAXWELL_DMA_COPY_A },
@@ -35,19 +35,19 @@
 };
 
 int
-gm204_ce_new(struct nvkm_device *device, int index,
+gm200_ce_new(struct nvkm_device *device, int index,
 	     struct nvkm_engine **pengine)
 {
 	if (index == NVKM_ENGINE_CE0) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm200_ce, device, index,
 					0x00000040, true, pengine);
 	} else
 	if (index == NVKM_ENGINE_CE1) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm200_ce, device, index,
 					0x00000080, true, pengine);
 	} else
 	if (index == NVKM_ENGINE_CE2) {
-		return nvkm_engine_new_(&gm204_ce, device, index,
+		return nvkm_engine_new_(&gm200_ce, device, index,
 					0x00200000, true, pengine);
 	}
 	return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b1ba1c7..9f32c87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1347,6 +1347,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1383,6 +1384,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1418,6 +1420,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1453,6 +1456,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1489,6 +1493,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1525,6 +1530,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1561,6 +1567,7 @@
 	.gpio = g94_gpio_new,
 	.i2c = g94_i2c_new,
 	.ibus = gf100_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1596,6 +1603,7 @@
 	.gpio = gf119_gpio_new,
 	.i2c = gf117_i2c_new,
 	.ibus = gf117_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1629,6 +1637,7 @@
 	.gpio = gf119_gpio_new,
 	.i2c = gf119_i2c_new,
 	.ibus = gf117_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gf100_ltc_new,
 	.mc = gf100_mc_new,
@@ -1664,6 +1673,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gf100_mc_new,
@@ -1701,6 +1711,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gf100_mc_new,
@@ -1738,6 +1749,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gf100_mc_new,
@@ -1799,6 +1811,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gf100_mc_new,
@@ -1814,7 +1827,7 @@
 	.ce[2] = gk104_ce_new,
 	.disp = gk110_disp_new,
 	.dma = gf119_dma_new,
-	.fifo = gk104_fifo_new,
+	.fifo = gk110_fifo_new,
 	.gr = gk110_gr_new,
 	.mspdec = gk104_mspdec_new,
 	.msppp = gf100_msppp_new,
@@ -1835,6 +1848,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gf119_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gf100_mc_new,
@@ -1850,7 +1864,7 @@
 	.ce[2] = gk104_ce_new,
 	.disp = gk110_disp_new,
 	.dma = gf119_dma_new,
-	.fifo = gk104_fifo_new,
+	.fifo = gk110_fifo_new,
 	.gr = gk110b_gr_new,
 	.mspdec = gk104_mspdec_new,
 	.msppp = gf100_msppp_new,
@@ -1871,6 +1885,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
@@ -1907,6 +1922,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gk104_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gk104_ltc_new,
 	.mc = gk20a_mc_new,
@@ -1943,6 +1959,7 @@
 	.gpio = gk104_gpio_new,
 	.i2c = gf119_i2c_new,
 	.ibus = gk104_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
 	.ltc = gm107_ltc_new,
 	.mc = gk20a_mc_new,
@@ -1953,43 +1970,78 @@
 	.therm = gm107_therm_new,
 	.timer = gk20a_timer_new,
 	.volt = gk104_volt_new,
-	.ce[0] = gk104_ce_new,
-	.ce[2] = gk104_ce_new,
+	.ce[0] = gm107_ce_new,
+	.ce[2] = gm107_ce_new,
 	.disp = gm107_disp_new,
 	.dma = gf119_dma_new,
-	.fifo = gk208_fifo_new,
+	.fifo = gm107_fifo_new,
 	.gr = gm107_gr_new,
 	.sw = gf100_sw_new,
 };
 
 static const struct nvkm_device_chip
+nv120_chipset = {
+	.name = "GM200",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm200_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm200_i2c_new,
+	.ibus = gm200_ibus_new,
+	.iccsense = gf100_iccsense_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm200_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gk104_pci_new,
+	.pmu = gm107_pmu_new,
+	.secboot = gm200_secboot_new,
+	.timer = gk20a_timer_new,
+	.volt = gk104_volt_new,
+	.ce[0] = gm200_ce_new,
+	.ce[1] = gm200_ce_new,
+	.ce[2] = gm200_ce_new,
+	.disp = gm200_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm200_fifo_new,
+	.gr = gm200_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
 nv124_chipset = {
 	.name = "GM204",
 	.bar = gf100_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
-	.devinit = gm204_devinit_new,
+	.devinit = gm200_devinit_new,
 	.fb = gm107_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
-	.i2c = gm204_i2c_new,
-	.ibus = gm204_ibus_new,
+	.i2c = gm200_i2c_new,
+	.ibus = gm200_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
-	.ltc = gm204_ltc_new,
+	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
+	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
 	.volt = gk104_volt_new,
-	.ce[0] = gm204_ce_new,
-	.ce[1] = gm204_ce_new,
-	.ce[2] = gm204_ce_new,
-	.disp = gm204_disp_new,
+	.ce[0] = gm200_ce_new,
+	.ce[1] = gm200_ce_new,
+	.ce[2] = gm200_ce_new,
+	.disp = gm200_disp_new,
 	.dma = gf119_dma_new,
-	.fifo = gm204_fifo_new,
-	.gr = gm204_gr_new,
+	.fifo = gm200_fifo_new,
+	.gr = gm200_gr_new,
 	.sw = gf100_sw_new,
 };
 
@@ -1999,28 +2051,30 @@
 	.bar = gf100_bar_new,
 	.bios = nvkm_bios_new,
 	.bus = gf100_bus_new,
-	.devinit = gm204_devinit_new,
+	.devinit = gm200_devinit_new,
 	.fb = gm107_fb_new,
 	.fuse = gm107_fuse_new,
 	.gpio = gk104_gpio_new,
-	.i2c = gm204_i2c_new,
-	.ibus = gm204_ibus_new,
+	.i2c = gm200_i2c_new,
+	.ibus = gm200_ibus_new,
+	.iccsense = gf100_iccsense_new,
 	.imem = nv50_instmem_new,
-	.ltc = gm204_ltc_new,
+	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mmu = gf100_mmu_new,
 	.mxm = nv50_mxm_new,
 	.pci = gk104_pci_new,
 	.pmu = gm107_pmu_new,
+	.secboot = gm200_secboot_new,
 	.timer = gk20a_timer_new,
 	.volt = gk104_volt_new,
-	.ce[0] = gm204_ce_new,
-	.ce[1] = gm204_ce_new,
-	.ce[2] = gm204_ce_new,
-	.disp = gm204_disp_new,
+	.ce[0] = gm200_ce_new,
+	.ce[1] = gm200_ce_new,
+	.ce[2] = gm200_ce_new,
+	.disp = gm200_disp_new,
 	.dma = gf119_dma_new,
-	.fifo = gm204_fifo_new,
-	.gr = gm206_gr_new,
+	.fifo = gm200_fifo_new,
+	.gr = gm200_gr_new,
 	.sw = gf100_sw_new,
 };
 
@@ -2029,15 +2083,18 @@
 	.name = "GM20B",
 	.bar = gk20a_bar_new,
 	.bus = gf100_bus_new,
+	.clk = gm20b_clk_new,
 	.fb = gk20a_fb_new,
 	.fuse = gm107_fuse_new,
 	.ibus = gk20a_ibus_new,
 	.imem = gk20a_instmem_new,
-	.ltc = gm204_ltc_new,
+	.ltc = gm200_ltc_new,
 	.mc = gk20a_mc_new,
 	.mmu = gf100_mmu_new,
+	.secboot = gm20b_secboot_new,
 	.timer = gk20a_timer_new,
-	.ce[2] = gm204_ce_new,
+	.ce[2] = gm200_ce_new,
+	.volt = gm20b_volt_new,
 	.dma = gf119_dma_new,
 	.fifo = gm20b_fifo_new,
 	.gr = gm20b_gr_new,
@@ -2072,26 +2129,28 @@
 
 	switch (index) {
 #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
-	_(BAR    , device->bar    , &device->bar->subdev);
-	_(VBIOS  , device->bios   , &device->bios->subdev);
-	_(BUS    , device->bus    , &device->bus->subdev);
-	_(CLK    , device->clk    , &device->clk->subdev);
-	_(DEVINIT, device->devinit, &device->devinit->subdev);
-	_(FB     , device->fb     , &device->fb->subdev);
-	_(FUSE   , device->fuse   , &device->fuse->subdev);
-	_(GPIO   , device->gpio   , &device->gpio->subdev);
-	_(I2C    , device->i2c    , &device->i2c->subdev);
-	_(IBUS   , device->ibus   ,  device->ibus);
-	_(INSTMEM, device->imem   , &device->imem->subdev);
-	_(LTC    , device->ltc    , &device->ltc->subdev);
-	_(MC     , device->mc     , &device->mc->subdev);
-	_(MMU    , device->mmu    , &device->mmu->subdev);
-	_(MXM    , device->mxm    ,  device->mxm);
-	_(PCI    , device->pci    , &device->pci->subdev);
-	_(PMU    , device->pmu    , &device->pmu->subdev);
-	_(THERM  , device->therm  , &device->therm->subdev);
-	_(TIMER  , device->timer  , &device->timer->subdev);
-	_(VOLT   , device->volt   , &device->volt->subdev);
+	_(BAR     , device->bar     , &device->bar->subdev);
+	_(VBIOS   , device->bios    , &device->bios->subdev);
+	_(BUS     , device->bus     , &device->bus->subdev);
+	_(CLK     , device->clk     , &device->clk->subdev);
+	_(DEVINIT , device->devinit , &device->devinit->subdev);
+	_(FB      , device->fb      , &device->fb->subdev);
+	_(FUSE    , device->fuse    , &device->fuse->subdev);
+	_(GPIO    , device->gpio    , &device->gpio->subdev);
+	_(I2C     , device->i2c     , &device->i2c->subdev);
+	_(IBUS    , device->ibus    ,  device->ibus);
+	_(ICCSENSE, device->iccsense, &device->iccsense->subdev);
+	_(INSTMEM , device->imem    , &device->imem->subdev);
+	_(LTC     , device->ltc     , &device->ltc->subdev);
+	_(MC      , device->mc      , &device->mc->subdev);
+	_(MMU     , device->mmu     , &device->mmu->subdev);
+	_(MXM     , device->mxm     ,  device->mxm);
+	_(PCI     , device->pci     , &device->pci->subdev);
+	_(PMU     , device->pmu     , &device->pmu->subdev);
+	_(SECBOOT , device->secboot , &device->secboot->subdev);
+	_(THERM   , device->therm   , &device->therm->subdev);
+	_(TIMER   , device->timer   , &device->timer->subdev);
+	_(VOLT    , device->volt    , &device->volt->subdev);
 #undef _
 	default:
 		engine = nvkm_device_engine(device, index);
@@ -2110,27 +2169,30 @@
 
 	switch (index) {
 #define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
-	_(BSP    , device->bsp    ,  device->bsp);
-	_(CE0    , device->ce[0]  ,  device->ce[0]);
-	_(CE1    , device->ce[1]  ,  device->ce[1]);
-	_(CE2    , device->ce[2]  ,  device->ce[2]);
-	_(CIPHER , device->cipher ,  device->cipher);
-	_(DISP   , device->disp   , &device->disp->engine);
-	_(DMAOBJ , device->dma    , &device->dma->engine);
-	_(FIFO   , device->fifo   , &device->fifo->engine);
-	_(GR     , device->gr     , &device->gr->engine);
-	_(IFB    , device->ifb    ,  device->ifb);
-	_(ME     , device->me     ,  device->me);
-	_(MPEG   , device->mpeg   ,  device->mpeg);
-	_(MSENC  , device->msenc  ,  device->msenc);
-	_(MSPDEC , device->mspdec ,  device->mspdec);
-	_(MSPPP  , device->msppp  ,  device->msppp);
-	_(MSVLD  , device->msvld  ,  device->msvld);
-	_(PM     , device->pm     , &device->pm->engine);
-	_(SEC    , device->sec    ,  device->sec);
-	_(SW     , device->sw     , &device->sw->engine);
-	_(VIC    , device->vic    ,  device->vic);
-	_(VP     , device->vp     ,  device->vp);
+	_(BSP    , device->bsp     ,  device->bsp);
+	_(CE0    , device->ce[0]   ,  device->ce[0]);
+	_(CE1    , device->ce[1]   ,  device->ce[1]);
+	_(CE2    , device->ce[2]   ,  device->ce[2]);
+	_(CIPHER , device->cipher  ,  device->cipher);
+	_(DISP   , device->disp    , &device->disp->engine);
+	_(DMAOBJ , device->dma     , &device->dma->engine);
+	_(FIFO   , device->fifo    , &device->fifo->engine);
+	_(GR     , device->gr      , &device->gr->engine);
+	_(IFB    , device->ifb     ,  device->ifb);
+	_(ME     , device->me      ,  device->me);
+	_(MPEG   , device->mpeg    ,  device->mpeg);
+	_(MSENC  , device->msenc   ,  device->msenc);
+	_(MSPDEC , device->mspdec  ,  device->mspdec);
+	_(MSPPP  , device->msppp   ,  device->msppp);
+	_(MSVLD  , device->msvld   ,  device->msvld);
+	_(NVENC0 , device->nvenc[0],  device->nvenc[0]);
+	_(NVENC1 , device->nvenc[1],  device->nvenc[1]);
+	_(NVDEC  , device->nvdec   ,  device->nvdec);
+	_(PM     , device->pm      , &device->pm->engine);
+	_(SEC    , device->sec     ,  device->sec);
+	_(SW     , device->sw      , &device->sw->engine);
+	_(VIC    , device->vic     ,  device->vic);
+	_(VP     , device->vp      ,  device->vp);
 #undef _
 	default:
 		WARN_ON(1);
@@ -2261,6 +2323,8 @@
 	} while (--i >= 0);
 
 fail:
+	nvkm_device_fini(device, false);
+
 	nvdev_error(device, "init failed with %d\n", ret);
 	return ret;
 }
@@ -2459,6 +2523,7 @@
 		case 0x106: device->chip = &nv106_chipset; break;
 		case 0x108: device->chip = &nv108_chipset; break;
 		case 0x117: device->chip = &nv117_chipset; break;
+		case 0x120: device->chip = &nv120_chipset; break;
 		case 0x124: device->chip = &nv124_chipset; break;
 		case 0x126: device->chip = &nv126_chipset; break;
 		case 0x12b: device->chip = &nv12b_chipset; break;
@@ -2518,47 +2583,52 @@
 	}                                                                      \
 	break
 		switch (i) {
-		_(NVKM_SUBDEV_BAR    ,     bar);
-		_(NVKM_SUBDEV_VBIOS  ,    bios);
-		_(NVKM_SUBDEV_BUS    ,     bus);
-		_(NVKM_SUBDEV_CLK    ,     clk);
-		_(NVKM_SUBDEV_DEVINIT, devinit);
-		_(NVKM_SUBDEV_FB     ,      fb);
-		_(NVKM_SUBDEV_FUSE   ,    fuse);
-		_(NVKM_SUBDEV_GPIO   ,    gpio);
-		_(NVKM_SUBDEV_I2C    ,     i2c);
-		_(NVKM_SUBDEV_IBUS   ,    ibus);
-		_(NVKM_SUBDEV_INSTMEM,    imem);
-		_(NVKM_SUBDEV_LTC    ,     ltc);
-		_(NVKM_SUBDEV_MC     ,      mc);
-		_(NVKM_SUBDEV_MMU    ,     mmu);
-		_(NVKM_SUBDEV_MXM    ,     mxm);
-		_(NVKM_SUBDEV_PCI    ,     pci);
-		_(NVKM_SUBDEV_PMU    ,     pmu);
-		_(NVKM_SUBDEV_THERM  ,   therm);
-		_(NVKM_SUBDEV_TIMER  ,   timer);
-		_(NVKM_SUBDEV_VOLT   ,    volt);
-		_(NVKM_ENGINE_BSP    ,     bsp);
-		_(NVKM_ENGINE_CE0    ,   ce[0]);
-		_(NVKM_ENGINE_CE1    ,   ce[1]);
-		_(NVKM_ENGINE_CE2    ,   ce[2]);
-		_(NVKM_ENGINE_CIPHER ,  cipher);
-		_(NVKM_ENGINE_DISP   ,    disp);
-		_(NVKM_ENGINE_DMAOBJ ,     dma);
-		_(NVKM_ENGINE_FIFO   ,    fifo);
-		_(NVKM_ENGINE_GR     ,      gr);
-		_(NVKM_ENGINE_IFB    ,     ifb);
-		_(NVKM_ENGINE_ME     ,      me);
-		_(NVKM_ENGINE_MPEG   ,    mpeg);
-		_(NVKM_ENGINE_MSENC  ,   msenc);
-		_(NVKM_ENGINE_MSPDEC ,  mspdec);
-		_(NVKM_ENGINE_MSPPP  ,   msppp);
-		_(NVKM_ENGINE_MSVLD  ,   msvld);
-		_(NVKM_ENGINE_PM     ,      pm);
-		_(NVKM_ENGINE_SEC    ,     sec);
-		_(NVKM_ENGINE_SW     ,      sw);
-		_(NVKM_ENGINE_VIC    ,     vic);
-		_(NVKM_ENGINE_VP     ,      vp);
+		_(NVKM_SUBDEV_BAR     ,      bar);
+		_(NVKM_SUBDEV_VBIOS   ,     bios);
+		_(NVKM_SUBDEV_BUS     ,      bus);
+		_(NVKM_SUBDEV_CLK     ,      clk);
+		_(NVKM_SUBDEV_DEVINIT ,  devinit);
+		_(NVKM_SUBDEV_FB      ,       fb);
+		_(NVKM_SUBDEV_FUSE    ,     fuse);
+		_(NVKM_SUBDEV_GPIO    ,     gpio);
+		_(NVKM_SUBDEV_I2C     ,      i2c);
+		_(NVKM_SUBDEV_IBUS    ,     ibus);
+		_(NVKM_SUBDEV_ICCSENSE, iccsense);
+		_(NVKM_SUBDEV_INSTMEM ,     imem);
+		_(NVKM_SUBDEV_LTC     ,      ltc);
+		_(NVKM_SUBDEV_MC      ,       mc);
+		_(NVKM_SUBDEV_MMU     ,      mmu);
+		_(NVKM_SUBDEV_MXM     ,      mxm);
+		_(NVKM_SUBDEV_PCI     ,      pci);
+		_(NVKM_SUBDEV_PMU     ,      pmu);
+		_(NVKM_SUBDEV_SECBOOT ,  secboot);
+		_(NVKM_SUBDEV_THERM   ,    therm);
+		_(NVKM_SUBDEV_TIMER   ,    timer);
+		_(NVKM_SUBDEV_VOLT    ,     volt);
+		_(NVKM_ENGINE_BSP     ,      bsp);
+		_(NVKM_ENGINE_CE0     ,    ce[0]);
+		_(NVKM_ENGINE_CE1     ,    ce[1]);
+		_(NVKM_ENGINE_CE2     ,    ce[2]);
+		_(NVKM_ENGINE_CIPHER  ,   cipher);
+		_(NVKM_ENGINE_DISP    ,     disp);
+		_(NVKM_ENGINE_DMAOBJ  ,      dma);
+		_(NVKM_ENGINE_FIFO    ,     fifo);
+		_(NVKM_ENGINE_GR      ,       gr);
+		_(NVKM_ENGINE_IFB     ,      ifb);
+		_(NVKM_ENGINE_ME      ,       me);
+		_(NVKM_ENGINE_MPEG    ,     mpeg);
+		_(NVKM_ENGINE_MSENC   ,    msenc);
+		_(NVKM_ENGINE_MSPDEC  ,   mspdec);
+		_(NVKM_ENGINE_MSPPP   ,    msppp);
+		_(NVKM_ENGINE_MSVLD   ,    msvld);
+		_(NVKM_ENGINE_NVENC0  , nvenc[0]);
+		_(NVKM_ENGINE_NVENC1  , nvenc[1]);
+		_(NVKM_ENGINE_NVDEC   ,    nvdec);
+		_(NVKM_ENGINE_PM      ,       pm);
+		_(NVKM_ENGINE_SEC     ,      sec);
+		_(NVKM_ENGINE_SW      ,       sw);
+		_(NVKM_ENGINE_VIC     ,      vic);
+		_(NVKM_ENGINE_VP      ,       vp);
 		default:
 			WARN_ON(1);
 			continue;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad030..18fab397 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,7 @@
 	.fini = nvkm_device_pci_fini,
 	.resource_addr = nvkm_device_pci_resource_addr,
 	.resource_size = nvkm_device_pci_resource_size,
-	.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+	.cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
 };
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index ed3ad2c..e80f6ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -12,6 +12,7 @@
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 #include <subdev/ibus.h>
+#include <subdev/iccsense.h>
 #include <subdev/instmem.h>
 #include <subdev/ltc.h>
 #include <subdev/mc.h>
@@ -22,6 +23,7 @@
 #include <subdev/therm.h>
 #include <subdev/timer.h>
 #include <subdev/volt.h>
+#include <subdev/secboot.h>
 
 #include <engine/bsp.h>
 #include <engine/ce.h>
@@ -34,9 +36,12 @@
 #include <engine/mspdec.h>
 #include <engine/msppp.h>
 #include <engine/msvld.h>
+#include <engine/nvenc.h>
+#include <engine/nvdec.h>
 #include <engine/pm.h>
 #include <engine/sec.h>
 #include <engine/sw.h>
+#include <engine/vic.h>
 #include <engine/vp.h>
 
 int  nvkm_device_ctor(const struct nvkm_device_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index e7e581d..9afa5f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -255,7 +255,6 @@
 
 	tdev->func = func;
 	tdev->pdev = pdev;
-	tdev->irq = -1;
 
 	tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
 	if (IS_ERR(tdev->vdd)) {
@@ -281,6 +280,15 @@
 		goto free;
 	}
 
+	/**
+	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
+	 * This will be refined in nouveau_ttm_init but we need to do it early
+	 * for instmem to behave properly
+	 */
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
+	if (ret)
+		goto free;
+
 	nvkm_device_tegra_probe_iommu(tdev);
 
 	ret = nvkm_device_tegra_power_up(tdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 04f6045..a74c5dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -9,7 +9,7 @@
 nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
-nvkm-y += nvkm/engine/disp/gm204.o
+nvkm-y += nvkm/engine/disp/gm200.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/outpdp.o
@@ -18,7 +18,7 @@
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
 nvkm-y += nvkm/engine/disp/sorgf119.o
-nvkm-y += nvkm/engine/disp/sorgm204.o
+nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/dport.o
 
 nvkm-y += nvkm/engine/disp/conn.o
@@ -43,7 +43,7 @@
 nvkm-y += nvkm/engine/disp/rootgk104.o
 nvkm-y += nvkm/engine/disp/rootgk110.o
 nvkm-y += nvkm/engine/disp/rootgm107.o
-nvkm-y += nvkm/engine/disp/rootgm204.o
+nvkm-y += nvkm/engine/disp/rootgm200.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
@@ -68,7 +68,7 @@
 nvkm-y += nvkm/engine/disp/coregk104.o
 nvkm-y += nvkm/engine/disp/coregk110.o
 nvkm-y += nvkm/engine/disp/coregm107.o
-nvkm-y += nvkm/engine/disp/coregm204.o
+nvkm-y += nvkm/engine/disp/coregm200.o
 
 nvkm-y += nvkm/engine/disp/ovlynv50.o
 nvkm-y += nvkm/engine/disp/ovlyg84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
similarity index 94%
rename from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
index 222f4a8..bb23a86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
@@ -27,8 +27,8 @@
 #include <nvif/class.h>
 
 const struct nv50_disp_dmac_oclass
-gm204_disp_core_oclass = {
-	.base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+gm200_disp_core_oclass = {
+	.base.oclass = GM200_DISP_CORE_CHANNEL_DMA,
 	.base.minver = 0,
 	.base.maxver = 0,
 	.ctor = nv50_disp_core_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index c748ca2..fc84eb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -87,5 +87,5 @@
 
 extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
 
-extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
similarity index 87%
rename from drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 30f1987..67eec86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -25,18 +25,18 @@
 #include "rootnv50.h"
 
 static const struct nv50_disp_func
-gm204_disp = {
+gm200_disp = {
 	.intr = gf119_disp_intr,
 	.uevent = &gf119_disp_chan_uevent,
 	.super = gf119_disp_intr_supervisor,
-	.root = &gm204_disp_root_oclass,
+	.root = &gm200_disp_root_oclass,
 	.head.vblank_init = gf119_disp_vblank_init,
 	.head.vblank_fini = gf119_disp_vblank_fini,
 	.head.scanoutpos = gf119_disp_root_scanoutpos,
 	.outp.internal.crt = nv50_dac_output_new,
 	.outp.internal.tmds = nv50_sor_output_new,
 	.outp.internal.lvds = nv50_sor_output_new,
-	.outp.internal.dp = gm204_sor_dp_new,
+	.outp.internal.dp = gm200_sor_dp_new,
 	.dac.nr = 3,
 	.dac.power = nv50_dac_power,
 	.dac.sense = nv50_dac_sense,
@@ -44,11 +44,11 @@
 	.sor.power = nv50_sor_power,
 	.sor.hda_eld = gf119_hda_eld,
 	.sor.hdmi = gk104_hdmi_ctrl,
-	.sor.magic = gm204_sor_magic,
+	.sor.magic = gm200_sor_magic,
 };
 
 int
-gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	return gf119_disp_new_(&gm204_disp, device, index, pdisp);
+	return gf119_disp_new_(&gm200_disp, device, index, pdisp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 2590fec..0772719 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -42,7 +42,7 @@
 
 u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
 
-void gm204_sor_magic(struct nvkm_output *outp);
+void gm200_sor_magic(struct nvkm_output *outp);
 
 #define OUTP_MSG(o,l,f,a...) do {                                              \
 	struct nvkm_output *_outp = (o);                                       \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 731136d..e9067ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -63,6 +63,6 @@
 		     struct nvkm_output **);
 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
 
-int  gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
 		      struct nvkm_output **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
similarity index 86%
rename from drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 168bffe..38f5ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -27,11 +27,11 @@
 #include <nvif/class.h>
 
 static const struct nv50_disp_root_func
-gm204_disp_root = {
+gm200_disp_root = {
 	.init = gf119_disp_root_init,
 	.fini = gf119_disp_root_fini,
 	.dmac = {
-		&gm204_disp_core_oclass,
+		&gm200_disp_core_oclass,
 		&gk110_disp_base_oclass,
 		&gk104_disp_ovly_oclass,
 	},
@@ -42,17 +42,17 @@
 };
 
 static int
-gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+gm200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
 		    void *data, u32 size, struct nvkm_object **pobject)
 {
-	return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+	return nv50_disp_root_new_(&gm200_disp_root, disp, oclass,
 				   data, size, pobject);
 }
 
 const struct nvkm_disp_oclass
-gm204_disp_root_oclass = {
-	.base.oclass = GM204_DISP,
+gm200_disp_root_oclass = {
+	.base.oclass = GM200_DISP,
 	.base.minver = -1,
 	.base.maxver = -1,
-	.ctor = gm204_disp_root_new,
+	.ctor = gm200_disp_root_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 5b2c903..cb449ed8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -39,5 +39,5 @@
 extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
 extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
-extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
similarity index 79%
rename from drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 029e5f1..2cfbef9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -27,19 +27,19 @@
 #include <subdev/timer.h>
 
 static inline u32
-gm204_sor_soff(struct nvkm_output_dp *outp)
+gm200_sor_soff(struct nvkm_output_dp *outp)
 {
 	return (ffs(outp->base.info.or) - 1) * 0x800;
 }
 
 static inline u32
-gm204_sor_loff(struct nvkm_output_dp *outp)
+gm200_sor_loff(struct nvkm_output_dp *outp)
 {
-	return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+	return gm200_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
 }
 
 void
-gm204_sor_magic(struct nvkm_output *outp)
+gm200_sor_magic(struct nvkm_output *outp)
 {
 	struct nvkm_device *device = outp->disp->engine.subdev.device;
 	const u32 soff = outp->or * 0x100;
@@ -51,16 +51,16 @@
 }
 
 static inline u32
-gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
+gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
 {
 	return lane * 0x08;
 }
 
 static int
-gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-	const u32 soff = gm204_sor_soff(outp);
+	const u32 soff = gm200_sor_soff(outp);
 	const u32 data = 0x01010101 * pattern;
 	if (outp->base.info.sorconf.link & 1)
 		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
@@ -70,15 +70,15 @@
 }
 
 static int
-gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-	const u32 soff = gm204_sor_soff(outp);
-	const u32 loff = gm204_sor_loff(outp);
+	const u32 soff = gm200_sor_soff(outp);
+	const u32 loff = gm200_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
+		mask |= 1 << (gm200_sor_dp_lane_map(device, i) >> 3);
 
 	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
 	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
@@ -90,13 +90,13 @@
 }
 
 static int
-gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	struct nvkm_bios *bios = device->bios;
-	const u32 shift = gm204_sor_dp_lane_map(device, ln);
-	const u32 loff = gm204_sor_loff(outp);
+	const u32 shift = gm200_sor_dp_lane_map(device, ln);
+	const u32 loff = gm200_sor_loff(outp);
 	u32 addr, data[4];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
@@ -128,16 +128,16 @@
 }
 
 static const struct nvkm_output_dp_func
-gm204_sor_dp_func = {
-	.pattern = gm204_sor_dp_pattern,
-	.lnk_pwr = gm204_sor_dp_lnk_pwr,
+gm200_sor_dp_func = {
+	.pattern = gm200_sor_dp_pattern,
+	.lnk_pwr = gm200_sor_dp_lnk_pwr,
 	.lnk_ctl = gf119_sor_dp_lnk_ctl,
-	.drv_ctl = gm204_sor_dp_drv_ctl,
+	.drv_ctl = gm200_sor_dp_drv_ctl,
 };
 
 int
-gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+gm200_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
 		 struct nvkm_output **poutp)
 {
-	return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+	return nvkm_output_dp_new_(&gm200_sor_dp_func, disp, index, dcbE, poutp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 74993c1..65e5d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,9 +7,11 @@
 nvkm-y += nvkm/engine/fifo/g84.o
 nvkm-y += nvkm/engine/fifo/gf100.o
 nvkm-y += nvkm/engine/fifo/gk104.o
+nvkm-y += nvkm/engine/fifo/gk110.o
 nvkm-y += nvkm/engine/fifo/gk208.o
 nvkm-y += nvkm/engine/fifo/gk20a.o
-nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm107.o
+nvkm-y += nvkm/engine/fifo/gm200.o
 nvkm-y += nvkm/engine/fifo/gm20b.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
@@ -27,4 +29,5 @@
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
-nvkm-y += nvkm/engine/fifo/gpfifogm204.o
+nvkm-y += nvkm/engine/fifo/gpfifogk110.o
+nvkm-y += nvkm/engine/fifo/gpfifogm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 97bdddb..e06f4d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -7,7 +7,7 @@
 struct gk104_fifo_chan {
 	struct nvkm_fifo_chan base;
 	struct gk104_fifo *fifo;
-	int engine;
+	int runl;
 
 	struct list_head head;
 	bool killed;
@@ -25,5 +25,6 @@
 			  void *data, u32 size, struct nvkm_object **);
 
 extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 36a39c7..352a0ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -54,6 +54,7 @@
 	struct nvkm_device *device = subdev->device;
 	struct nvkm_memory *cur;
 	int nr = 0;
+	int target;
 
 	mutex_lock(&subdev->mutex);
 	cur = fifo->runlist.mem[fifo->runlist.active];
@@ -67,7 +68,10 @@
 	}
 	nvkm_done(cur);
 
-	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
+
+	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
+				    (target << 28));
 	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
 
 	if (wait_event_timeout(fifo->runlist.wait,
@@ -130,9 +134,9 @@
 }
 
 static void
-gf100_fifo_recover_work(struct work_struct *work)
+gf100_fifo_recover_work(struct work_struct *w)
 {
-	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	unsigned long flags;
@@ -140,15 +144,15 @@
 	u64 mask, todo;
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
-	mask = fifo->mask;
-	fifo->mask = 0ULL;
+	mask = fifo->recover.mask;
+	fifo->recover.mask = 0ULL;
 	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
+	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
 		engm |= 1 << gf100_fifo_engidx(fifo, engn);
 	nvkm_mask(device, 0x002630, engm, engm);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
+	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
 		if ((engine = nvkm_device_engine(device, engn))) {
 			nvkm_subdev_fini(&engine->subdev, false);
 			WARN_ON(nvkm_subdev_init(&engine->subdev));
@@ -176,8 +180,8 @@
 	list_del_init(&chan->head);
 	chan->killed = true;
 
-	fifo->mask |= 1ULL << engine->subdev.index;
-	schedule_work(&fifo->fault);
+	fifo->recover.mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->recover.work);
 }
 
 static const struct nvkm_enum
@@ -330,7 +334,7 @@
 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (eu) {
+	if (eu && eu->data2) {
 		switch (eu->data2) {
 		case NVKM_SUBDEV_BAR:
 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
@@ -544,9 +548,16 @@
 gf100_fifo_oneinit(struct nvkm_fifo *base)
 {
 	struct gf100_fifo *fifo = gf100_fifo(base);
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret;
 
+	/* Determine number of PBDMAs by checking valid enable bits. */
+	nvkm_wr32(device, 0x002204, 0xffffffff);
+	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
+	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
 			      false, &fifo->runlist.mem[0]);
 	if (ret)
@@ -576,25 +587,22 @@
 gf100_fifo_fini(struct nvkm_fifo *base)
 {
 	struct gf100_fifo *fifo = gf100_fifo(base);
-	flush_work(&fifo->fault);
+	flush_work(&fifo->recover.work);
 }
 
 static void
 gf100_fifo_init(struct nvkm_fifo *base)
 {
 	struct gf100_fifo *fifo = gf100_fifo(base);
-	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
-	struct nvkm_device *device = subdev->device;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int i;
 
-	nvkm_wr32(device, 0x000204, 0xffffffff);
-	nvkm_wr32(device, 0x002204, 0xffffffff);
+	/* Enable PBDMAs. */
+	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+	nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
 
-	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
-	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
-
-	/* assign engines to PBDMAs */
-	if (fifo->spoon_nr >= 3) {
+	/* Assign engines to PBDMAs. */
+	if (fifo->pbdma_nr >= 3) {
 		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
 		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
 		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
@@ -604,7 +612,7 @@
 	}
 
 	/* PBDMA[n] */
-	for (i = 0; i < fifo->spoon_nr; i++) {
+	for (i = 0; i < fifo->pbdma_nr; i++) {
 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
@@ -652,7 +660,7 @@
 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 		return -ENOMEM;
 	INIT_LIST_HEAD(&fifo->chan);
-	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+	INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
 	*pfifo = &fifo->base;
 
 	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 08c33c3..70db58e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -11,8 +11,12 @@
 
 	struct list_head chan;
 
-	struct work_struct fault;
-	u64 mask;
+	struct {
+		struct work_struct work;
+		u64 mask;
+	} recover;
+
+	int pbdma_nr;
 
 	struct {
 		struct nvkm_memory *mem[2];
@@ -24,7 +28,6 @@
 		struct nvkm_memory *mem;
 		struct nvkm_vma bar;
 	} user;
-	int spoon_nr;
 };
 
 void gf100_fifo_intr_engine(struct gf100_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 4fcd147d..68acb36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -47,34 +47,41 @@
 }
 
 void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, u32 engine)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
 {
-	struct gk104_fifo_engn *engn = &fifo->engine[engine];
 	struct gk104_fifo_chan *chan;
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	struct nvkm_memory *cur;
+	struct nvkm_memory *mem;
 	int nr = 0;
+	int target;
 
 	mutex_lock(&subdev->mutex);
-	cur = engn->runlist[engn->cur_runlist];
-	engn->cur_runlist = !engn->cur_runlist;
+	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
+	fifo->runlist[runl].next = !fifo->runlist[runl].next;
 
-	nvkm_kmap(cur);
-	list_for_each_entry(chan, &engn->chan, head) {
-		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
-		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+	nvkm_kmap(mem);
+	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+		nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
 		nr++;
 	}
-	nvkm_done(cur);
+	nvkm_done(mem);
 
-	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
-	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
+	if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
+		target = 0;
+	else
+		target = 3;
 
-	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
-			       (engine * 0x08)) & 0x00100000),
-				msecs_to_jiffies(2000)) == 0)
-		nvkm_error(subdev, "runlist %d update timeout\n", engine);
+	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+				    (target << 28));
+	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+	if (wait_event_timeout(fifo->runlist[runl].wait,
+			       !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
+				       & 0x00100000),
+			       msecs_to_jiffies(2000)) == 0)
+		nvkm_error(subdev, "runlist %d update timeout\n", runl);
 	mutex_unlock(&subdev->mutex);
 }
 
@@ -90,58 +97,51 @@
 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
 {
 	mutex_lock(&fifo->base.engine.subdev.mutex);
-	list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+	list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
 	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
-static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
-{
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
-	u64 subdevs = gk104_fifo_engine_subdev(engn);
-	if (subdevs)
-		return nvkm_device_engine(device, __ffs(subdevs));
-	return NULL;
-}
-
 static void
-gk104_fifo_recover_work(struct work_struct *work)
+gk104_fifo_recover_work(struct work_struct *w)
 {
-	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	unsigned long flags;
-	u32 engn, engm = 0;
-	u64 mask, todo;
+	u32 engm, runm, todo;
+	int engn, runl;
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
-	mask = fifo->mask;
-	fifo->mask = 0ULL;
+	runm = fifo->recover.runm;
+	engm = fifo->recover.engm;
+	fifo->recover.engm = 0;
+	fifo->recover.runm = 0;
 	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gk104_fifo_subdev_engine(engn);
-	nvkm_mask(device, 0x002630, engm, engm);
+	nvkm_mask(device, 0x002630, runm, runm);
 
-	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = nvkm_device_engine(device, engn))) {
+	for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+		if ((engine = fifo->engine[engn].engine)) {
 			nvkm_subdev_fini(&engine->subdev, false);
 			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
-		gk104_fifo_runlist_commit(fifo, gk104_fifo_subdev_engine(engn));
 	}
 
-	nvkm_wr32(device, 0x00262c, engm);
-	nvkm_mask(device, 0x002630, engm, 0x00000000);
+	for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+		gk104_fifo_runlist_commit(fifo, runl);
+
+	nvkm_wr32(device, 0x00262c, runm);
+	nvkm_mask(device, 0x002630, runm, 0x00000000);
 }
 
 static void
 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
-		  struct gk104_fifo_chan *chan)
+		   struct gk104_fifo_chan *chan)
 {
 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
+	int engn;
 
 	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
 		   nvkm_subdev_name[engine->subdev.index], chid);
@@ -151,8 +151,15 @@
 	list_del_init(&chan->head);
 	chan->killed = true;
 
-	fifo->mask |= 1ULL << engine->subdev.index;
-	schedule_work(&fifo->fault);
+	for (engn = 0; engn < fifo->engine_nr; engn++) {
+		if (fifo->engine[engn].engine == engine) {
+			fifo->recover.engm |= BIT(engn);
+			break;
+		}
+	}
+
+	fifo->recover.runm |= BIT(chan->runl);
+	schedule_work(&fifo->recover.work);
 }
 
 static const struct nvkm_enum
@@ -189,32 +196,31 @@
 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
 {
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
-	struct nvkm_engine *engine;
 	struct gk104_fifo_chan *chan;
 	unsigned long flags;
 	u32 engn;
 
 	spin_lock_irqsave(&fifo->base.lock, flags);
-	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+	for (engn = 0; engn < fifo->engine_nr; engn++) {
+		struct nvkm_engine *engine = fifo->engine[engn].engine;
+		int runl = fifo->engine[engn].runl;
 		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
 		u32 busy = (stat & 0x80000000);
-		u32 next = (stat & 0x07ff0000) >> 16;
+		u32 next = (stat & 0x0fff0000) >> 16;
 		u32 chsw = (stat & 0x00008000);
 		u32 save = (stat & 0x00004000);
 		u32 load = (stat & 0x00002000);
-		u32 prev = (stat & 0x000007ff);
+		u32 prev = (stat & 0x00000fff);
 		u32 chid = load ? next : prev;
 		(void)save;
 
-		if (busy && chsw) {
-			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
-				if (chan->base.chid == chid) {
-					engine = gk104_fifo_engine(fifo, engn);
-					if (!engine)
-						break;
-					gk104_fifo_recover(fifo, engine, chan);
-					break;
-				}
+		if (!busy || !chsw)
+			continue;
+
+		list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+			if (chan->base.chid == chid && engine) {
+				gk104_fifo_recover(fifo, engine, chan);
+				break;
 			}
 		}
 	}
@@ -395,7 +401,7 @@
 		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (eu) {
+	if (eu && eu->data2) {
 		switch (eu->data2) {
 		case NVKM_SUBDEV_BAR:
 			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
@@ -484,9 +490,10 @@
 			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
 				show &= ~0x00800000;
 		}
-		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	}
 
+	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+
 	if (show) {
 		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
 		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
@@ -537,10 +544,10 @@
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	u32 mask = nvkm_rd32(device, 0x002a00);
 	while (mask) {
-		u32 engn = __ffs(mask);
-		wake_up(&fifo->engine[engn].wait);
-		nvkm_wr32(device, 0x002a00, 1 << engn);
-		mask &= ~(1 << engn);
+		int runl = __ffs(mask);
+		wake_up(&fifo->runlist[runl].wait);
+		nvkm_wr32(device, 0x002a00, 1 << runl);
+		mask &= ~(1 << runl);
 	}
 }
 
@@ -647,7 +654,7 @@
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
-	flush_work(&fifo->fault);
+	flush_work(&fifo->recover.work);
 	/* allow mmu fault interrupts, even when we're not using fifo */
 	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
@@ -656,24 +663,122 @@
 gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
-	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret, i;
+	u32 *map;
 
-	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+	/* Determine number of PBDMAs by checking valid enable bits. */
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+	/* Read PBDMA->runlist(s) mapping from HW. */
+	if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
+		return -ENOMEM;
+
+	for (i = 0; i < fifo->pbdma_nr; i++)
+		map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
+
+	/* Read device topology from HW. */
+	for (i = 0; i < 64; i++) {
+		int type = -1, pbid = -1, engidx = -1;
+		int engn = -1, runl = -1, intr = -1, mcen = -1;
+		int fault = -1, j;
+		u32 data, addr = 0;
+
+		do {
+			data = nvkm_rd32(device, 0x022700 + (i * 0x04));
+			nvkm_trace(subdev, "%02x: %08x\n", i, data);
+			switch (data & 0x00000003) {
+			case 0x00000000: /* NOT_VALID */
+				continue;
+			case 0x00000001: /* DATA */
+				addr  = (data & 0x00fff000);
+				fault = (data & 0x000000f8) >> 3;
+				break;
+			case 0x00000002: /* ENUM */
+				if (data & 0x00000020)
+					engn = (data & 0x3c000000) >> 26;
+				if (data & 0x00000010)
+					runl = (data & 0x01e00000) >> 21;
+				if (data & 0x00000008)
+					intr = (data & 0x000f8000) >> 15;
+				if (data & 0x00000004)
+					mcen = (data & 0x00003e00) >> 9;
+				break;
+			case 0x00000003: /* ENGINE_TYPE */
+				type = (data & 0x7ffffffc) >> 2;
+				break;
+			}
+		} while ((data & 0x80000000) && ++i < 64);
+
+		if (!data)
+			continue;
+
+		/* Determine which PBDMA handles requests for this engine. */
+		for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
+			if (map[j] & (1 << runl)) {
+				pbid = j;
+				break;
+			}
+		}
+
+		/* Translate engine type to NVKM engine identifier. */
+		switch (type) {
+		case 0x00000000: engidx = NVKM_ENGINE_GR; break;
+		case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
+		case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
+		case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
+		case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
+		case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
+		case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
+		case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
+		case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
+		case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
+		case 0x0000000e: engidx = NVKM_ENGINE_NVENC0; break;
+		case 0x0000000f: engidx = NVKM_ENGINE_NVENC1; break;
+		case 0x00000010: engidx = NVKM_ENGINE_NVDEC; break;
+			break;
+		default:
+			break;
+		}
+
+		nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
+				   "pbdma %2d intr %2d reset %2d "
+				   "fault %2d addr %06x\n", type,
+			   engidx < 0 ? NULL : nvkm_subdev_name[engidx],
+			   engn, runl, pbid, intr, mcen, fault, addr);
+
+		/* Mark the engine as supported if everything checks out. */
+		if (engn >= 0 && runl >= 0) {
+			fifo->engine[engn].engine = engidx < 0 ? NULL :
+				nvkm_device_engine(device, engidx);
+			fifo->engine[engn].runl = runl;
+			fifo->engine[engn].pbid = pbid;
+			fifo->engine_nr = max(fifo->engine_nr, engn + 1);
+			fifo->runlist[runl].engm |= 1 << engn;
+			fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
+		}
+	}
+
+	kfree(map);
+
+	for (i = 0; i < fifo->runlist_nr; i++) {
 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 				      0x8000, 0x1000, false,
-				      &fifo->engine[i].runlist[0]);
+				      &fifo->runlist[i].mem[0]);
 		if (ret)
 			return ret;
 
 		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 				      0x8000, 0x1000, false,
-				      &fifo->engine[i].runlist[1]);
+				      &fifo->runlist[i].mem[1]);
 		if (ret)
 			return ret;
 
-		init_waitqueue_head(&fifo->engine[i].wait);
-		INIT_LIST_HEAD(&fifo->engine[i].chan);
+		init_waitqueue_head(&fifo->runlist[i].wait);
+		INIT_LIST_HEAD(&fifo->runlist[i].chan);
 	}
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
@@ -695,24 +800,21 @@
 gk104_fifo_init(struct nvkm_fifo *base)
 {
 	struct gk104_fifo *fifo = gk104_fifo(base);
-	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
-	struct nvkm_device *device = subdev->device;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int i;
 
-	/* enable all available PBDMA units */
-	nvkm_wr32(device, 0x000204, 0xffffffff);
-	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
-	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
+	/* Enable PBDMAs. */
+	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
 
 	/* PBDMA[n] */
-	for (i = 0; i < fifo->spoon_nr; i++) {
+	for (i = 0; i < fifo->pbdma_nr; i++) {
 		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
 		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
 		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
 	/* PBDMA[n].HCE */
-	for (i = 0; i < fifo->spoon_nr; i++) {
+	for (i = 0; i < fifo->pbdma_nr; i++) {
 		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
 		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
 	}
@@ -732,9 +834,9 @@
 	nvkm_vm_put(&fifo->user.bar);
 	nvkm_memory_del(&fifo->user.mem);
 
-	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
-		nvkm_memory_del(&fifo->engine[i].runlist[1]);
-		nvkm_memory_del(&fifo->engine[i].runlist[0]);
+	for (i = 0; i < fifo->runlist_nr; i++) {
+		nvkm_memory_del(&fifo->runlist[i].mem[1]);
+		nvkm_memory_del(&fifo->runlist[i].mem[0]);
 	}
 
 	return fifo;
@@ -748,7 +850,7 @@
 
 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
 		return -ENOMEM;
-	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
 	*pfifo = &fifo->base;
 
 	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index bec519d..9e5d00b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -6,25 +6,37 @@
 #include <subdev/mmu.h>
 
 struct gk104_fifo_chan;
-struct gk104_fifo_engn {
-	struct nvkm_memory *runlist[2];
-	int cur_runlist;
-	wait_queue_head_t wait;
-	struct list_head chan;
-};
-
 struct gk104_fifo {
 	struct nvkm_fifo base;
 
-	struct work_struct fault;
-	u64 mask;
+	struct {
+		struct work_struct work;
+		u32 engm;
+		u32 runm;
+	} recover;
 
-	struct gk104_fifo_engn engine[7];
+	int pbdma_nr;
+
+	struct {
+		struct nvkm_engine *engine;
+		int runl;
+		int pbid;
+	} engine[16];
+	int engine_nr;
+
+	struct {
+		struct nvkm_memory *mem[2];
+		int next;
+		wait_queue_head_t wait;
+		struct list_head chan;
+		u32 engm;
+	} runlist[16];
+	int runlist_nr;
+
 	struct {
 		struct nvkm_memory *mem;
 		struct nvkm_vma bar;
 	} user;
-	int spoon_nr;
 };
 
 int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
@@ -38,7 +50,7 @@
 void gk104_fifo_uevent_fini(struct nvkm_fifo *);
 void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, u32 engine);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
 
 static inline u64
 gk104_fifo_engine_subdev(int engine)
@@ -58,23 +70,4 @@
 		return 0;
 	}
 }
-
-static inline int
-gk104_fifo_subdev_engine(int subdev)
-{
-	switch (subdev) {
-	case NVKM_ENGINE_GR:
-	case NVKM_ENGINE_SW:
-	case NVKM_ENGINE_CE2   : return 0;
-	case NVKM_ENGINE_MSPDEC: return 1;
-	case NVKM_ENGINE_MSPPP : return 2;
-	case NVKM_ENGINE_MSVLD : return 3;
-	case NVKM_ENGINE_CE0   : return 4;
-	case NVKM_ENGINE_CE1   : return 5;
-	case NVKM_ENGINE_MSENC : return 6;
-	default:
-		WARN_ON(1);
-		return 0;
-	}
-}
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
similarity index 87%
copy from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
copy to drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 2db629f..41307fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2016 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -25,7 +25,7 @@
 #include "changk104.h"
 
 static const struct nvkm_fifo_func
-gm204_fifo = {
+gk110_fifo = {
 	.dtor = gk104_fifo_dtor,
 	.oneinit = gk104_fifo_oneinit,
 	.init = gk104_fifo_init,
@@ -34,13 +34,13 @@
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.chan = {
-		&gm204_fifo_gpfifo_oclass,
+		&gk110_fifo_gpfifo_oclass,
 		NULL
 	},
 };
 
 int
-gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
similarity index 87%
copy from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
copy to drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 2db629f..6d59d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2016 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -25,7 +25,7 @@
 #include "changk104.h"
 
 static const struct nvkm_fifo_func
-gm204_fifo = {
+gm107_fifo = {
 	.dtor = gk104_fifo_dtor,
 	.oneinit = gk104_fifo_oneinit,
 	.init = gk104_fifo_init,
@@ -34,13 +34,13 @@
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.chan = {
-		&gm204_fifo_gpfifo_oclass,
+		&gk110_fifo_gpfifo_oclass,
 		NULL
 	},
 };
 
 int
-gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
similarity index 89%
rename from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 2db629f..4bdd430 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -25,7 +25,7 @@
 #include "changk104.h"
 
 static const struct nvkm_fifo_func
-gm204_fifo = {
+gm200_fifo = {
 	.dtor = gk104_fifo_dtor,
 	.oneinit = gk104_fifo_oneinit,
 	.init = gk104_fifo_init,
@@ -34,13 +34,13 @@
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.chan = {
-		&gm204_fifo_gpfifo_oclass,
+		&gm200_fifo_gpfifo_oclass,
 		NULL
 	},
 };
 
 int
-gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
+	return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index ae6375d..4c91d4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -32,7 +32,7 @@
 	.uevent_init = gk104_fifo_uevent_init,
 	.uevent_fini = gk104_fifo_uevent_fini,
 	.chan = {
-		&gm204_fifo_gpfifo_oclass,
+		&gm200_fifo_gpfifo_oclass,
 		NULL
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 2e1df01..ed43510 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -63,9 +63,15 @@
 	case NVKM_ENGINE_CE1   :
 	case NVKM_ENGINE_CE2   : return 0x0000;
 	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_SEC   : return 0x0220;
 	case NVKM_ENGINE_MSPDEC: return 0x0250;
 	case NVKM_ENGINE_MSPPP : return 0x0260;
 	case NVKM_ENGINE_MSVLD : return 0x0270;
+	case NVKM_ENGINE_VIC   : return 0x0280;
+	case NVKM_ENGINE_MSENC : return 0x0290;
+	case NVKM_ENGINE_NVDEC : return 0x02100270;
+	case NVKM_ENGINE_NVENC0: return 0x02100290;
+	case NVKM_ENGINE_NVENC1: return 0x0210;
 	default:
 		WARN_ON(1);
 		return 0;
@@ -76,9 +82,9 @@
 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine, bool suspend)
 {
-	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
 	struct nvkm_gpuobj *inst = chan->base.inst;
+	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 	int ret;
 
 	ret = gk104_fifo_gpfifo_kick(chan);
@@ -87,8 +93,12 @@
 
 	if (offset) {
 		nvkm_kmap(inst);
-		nvkm_wo32(inst, offset + 0x00, 0x00000000);
-		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000);
+		nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000);
+		if ((offset >>= 16)) {
+			nvkm_wo32(inst, offset + 0x00, 0x00000000);
+			nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		}
 		nvkm_done(inst);
 	}
 
@@ -99,15 +109,21 @@
 gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
 			      struct nvkm_engine *engine)
 {
-	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
 	struct nvkm_gpuobj *inst = chan->base.inst;
+	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
 
 	if (offset) {
-		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		u64   addr = chan->engn[engine->subdev.index].vma.offset;
+		u32 datalo = lower_32_bits(addr) | 0x00000004;
+		u32 datahi = upper_32_bits(addr);
 		nvkm_kmap(inst);
-		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
-		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
+		nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
+		if ((offset >>= 16)) {
+			nvkm_wo32(inst, offset + 0x00, datalo);
+			nvkm_wo32(inst, offset + 0x04, datahi);
+		}
 		nvkm_done(inst);
 	}
 
@@ -154,7 +170,8 @@
 	if (!list_empty(&chan->head)) {
 		gk104_fifo_runlist_remove(fifo, chan);
 		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
-		gk104_fifo_runlist_commit(fifo, chan->engine);
+		gk104_fifo_gpfifo_kick(chan);
+		gk104_fifo_runlist_commit(fifo, chan->runl);
 	}
 
 	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -169,13 +186,13 @@
 	u32 addr = chan->base.inst->addr >> 12;
 	u32 coff = chan->base.chid * 8;
 
-	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16);
 	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
 
 	if (list_empty(&chan->head) && !chan->killed) {
 		gk104_fifo_runlist_insert(fifo, chan);
 		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
-		gk104_fifo_runlist_commit(fifo, chan->engine);
+		gk104_fifo_runlist_commit(fifo, chan->runl);
 		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
 	}
 }
@@ -201,73 +218,79 @@
 	.engine_fini = gk104_fifo_gpfifo_engine_fini,
 };
 
-int
-gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-		      void *data, u32 size, struct nvkm_object **pobject)
+struct gk104_fifo_chan_func {
+	u32 engine;
+	u64 subdev;
+};
+
+static int
+gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
+		       struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
+		       u64 vm, u64 ioffset, u64 ilength,
+		       const struct nvkm_oclass *oclass,
+		       struct nvkm_object **pobject)
 {
-	union {
-		struct kepler_channel_gpfifo_a_v0 v0;
-	} *args = data;
-	struct gk104_fifo *fifo = gk104_fifo(base);
 	struct nvkm_device *device = fifo->base.engine.subdev.device;
-	struct nvkm_object *parent = oclass->parent;
 	struct gk104_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	u32 engines;
-	int ret = -ENOSYS, i;
+	int runlist = -1, ret = -ENOSYS, i, j;
+	u32 engines = 0, present = 0;
+	u64 subdevs = 0;
+	u64 usermem;
 
-	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
-				   "ioffset %016llx ilength %08x engine %08x\n",
-			   args->v0.version, args->v0.vm, args->v0.ioffset,
-			   args->v0.ilength, args->v0.engine);
-	} else
-		return ret;
+	/* Determine which downstream engines are present */
+	for (i = 0; i < fifo->engine_nr; i++) {
+		struct nvkm_engine *engine = fifo->engine[i].engine;
+		if (engine) {
+			u64 submask = BIT_ULL(engine->subdev.index);
+			for (j = 0; func[j].subdev; j++) {
+				if (func[j].subdev & submask) {
+					present |= func[j].engine;
+					break;
+				}
+			}
 
-	/* determine which downstream engines are present */
-	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
-		u64 subdevs = gk104_fifo_engine_subdev(i);
-		if (!nvkm_device_engine(device, __ffs64(subdevs)))
-			continue;
-		engines |= (1 << i);
+			if (!func[j].subdev)
+				continue;
+
+			if (runlist < 0 && (*engmask & present))
+				runlist = fifo->engine[i].runl;
+			if (runlist == fifo->engine[i].runl) {
+				engines |= func[j].engine;
+				subdevs |= func[j].subdev;
+			}
+		}
 	}
 
-	/* if this is an engine mask query, we're done */
-	if (!args->v0.engine) {
-		args->v0.engine = engines;
+	/* Just an engine mask query?  All done here! */
+	if (!*engmask) {
+		*engmask = present;
 		return nvkm_object_new(oclass, NULL, 0, pobject);
 	}
 
-	/* check that we support a requested engine - note that the user
-	 * argument is a mask in order to allow the user to request (for
-	 * example) *any* copy engine, but doesn't matter which.
-	 */
-	args->v0.engine &= engines;
-	if (!args->v0.engine) {
-		nvif_ioctl(parent, "no supported engine\n");
+	/* No runlist?  No supported engines. */
+	*engmask = present;
+	if (runlist < 0)
 		return -ENODEV;
-	}
+	*engmask = engines;
 
-	/* allocate the channel */
+	/* Allocate the channel. */
 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
 		return -ENOMEM;
 	*pobject = &chan->base.object;
 	chan->fifo = fifo;
-	chan->engine = __ffs(args->v0.engine);
+	chan->runl = runlist;
 	INIT_LIST_HEAD(&chan->head);
 
 	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
-				  0x1000, 0x1000, true, args->v0.vm, 0,
-				  gk104_fifo_engine_subdev(chan->engine),
+				  0x1000, 0x1000, true, vm, 0, subdevs,
 				  1, fifo->user.bar.offset, 0x200,
 				  oclass, &chan->base);
 	if (ret)
 		return ret;
 
-	args->v0.chid = chan->base.chid;
+	*chid = chan->base.chid;
 
-	/* page directory */
+	/* Page directory. */
 	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
 	if (ret)
 		return ret;
@@ -283,10 +306,9 @@
 	if (ret)
 		return ret;
 
-	/* clear channel control registers */
+	/* Clear channel control registers. */
 	usermem = chan->base.chid * 0x200;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
+	ilength = order_base_2(ilength / 8);
 
 	nvkm_kmap(fifo->user.mem);
 	for (i = 0; i < 0x200; i += 4)
@@ -315,6 +337,56 @@
 	return 0;
 }
 
+static const struct gk104_fifo_chan_func
+gk104_fifo_gpfifo[] = {
+	{ NVA06F_V0_ENGINE_SW | NVA06F_V0_ENGINE_GR,
+		BIT_ULL(NVKM_ENGINE_SW) | BIT_ULL(NVKM_ENGINE_GR)
+	},
+	{ NVA06F_V0_ENGINE_SEC   , BIT_ULL(NVKM_ENGINE_SEC   ) },
+	{ NVA06F_V0_ENGINE_MSVLD , BIT_ULL(NVKM_ENGINE_MSVLD ) },
+	{ NVA06F_V0_ENGINE_MSPDEC, BIT_ULL(NVKM_ENGINE_MSPDEC) },
+	{ NVA06F_V0_ENGINE_MSPPP , BIT_ULL(NVKM_ENGINE_MSPPP ) },
+	{ NVA06F_V0_ENGINE_MSENC , BIT_ULL(NVKM_ENGINE_MSENC ) },
+	{ NVA06F_V0_ENGINE_VIC   , BIT_ULL(NVKM_ENGINE_VIC   ) },
+	{ NVA06F_V0_ENGINE_NVDEC , BIT_ULL(NVKM_ENGINE_NVDEC ) },
+	{ NVA06F_V0_ENGINE_NVENC0, BIT_ULL(NVKM_ENGINE_NVENC0) },
+	{ NVA06F_V0_ENGINE_NVENC1, BIT_ULL(NVKM_ENGINE_NVENC1) },
+	{ NVA06F_V0_ENGINE_CE0   , BIT_ULL(NVKM_ENGINE_CE0   ) },
+	{ NVA06F_V0_ENGINE_CE1   , BIT_ULL(NVKM_ENGINE_CE1   ) },
+	{ NVA06F_V0_ENGINE_CE2   , BIT_ULL(NVKM_ENGINE_CE2   ) },
+	{}
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct kepler_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	int ret = -ENOSYS;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x engine %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.engines);
+		return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
+					      &args->v0.engines,
+					      &args->v0.chid,
+					       args->v0.vm,
+					       args->v0.ioffset,
+					       args->v0.ilength,
+					      oclass, pobject);
+
+	}
+
+	return ret;
+}
+
 const struct nvkm_fifo_chan_oclass
 gk104_fifo_gpfifo_oclass = {
 	.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
similarity index 92%
copy from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
copy to drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
index 6511d6e..a9aa69c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2016 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -26,8 +26,8 @@
 #include <nvif/class.h>
 
 const struct nvkm_fifo_chan_oclass
-gm204_fifo_gpfifo_oclass = {
-	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
+gk110_fifo_gpfifo_oclass = {
+	.base.oclass = KEPLER_CHANNEL_GPFIFO_B,
 	.base.minver = 0,
 	.base.maxver = 0,
 	.ctor = gk104_fifo_gpfifo_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
similarity index 97%
rename from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
rename to drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
index 6511d6e..a133151 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
@@ -26,7 +26,7 @@
 #include <nvif/class.h>
 
 const struct nvkm_fifo_chan_oclass
-gm204_fifo_gpfifo_oclass = {
+gm200_fifo_gpfifo_oclass = {
 	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
 	.base.minver = 0,
 	.base.maxver = 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 9ad0d0e..290ed0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -29,8 +29,7 @@
 nvkm-y += nvkm/engine/gr/gk208.o
 nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
-nvkm-y += nvkm/engine/gr/gm204.o
-nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm200.o
 nvkm-y += nvkm/engine/gr/gm20b.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
@@ -47,6 +46,5 @@
 nvkm-y += nvkm/engine/gr/ctxgk208.o
 nvkm-y += nvkm/engine/gr/ctxgk20a.o
 nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm200.o
 nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3c64040..3c86739 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -97,12 +97,11 @@
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
-extern const struct gf100_grctx_func gm204_grctx;
-void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
-void gm204_grctx_generate_tpcid(struct gf100_gr *);
-void gm204_grctx_generate_405b60(struct gf100_gr *);
+extern const struct gf100_grctx_func gm200_grctx;
+void gm200_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm200_grctx_generate_tpcid(struct gf100_gr *);
+void gm200_grctx_generate_405b60(struct gf100_gr *);
 
-extern const struct gf100_grctx_func gm206_grctx;
 extern const struct gf100_grctx_func gm20b_grctx;
 
 /* context init value lists */
@@ -210,19 +209,4 @@
 
 extern const struct gf100_gr_init gm107_grctx_init_gpc_unk_0[];
 extern const struct gf100_gr_init gm107_grctx_init_wwdx_0[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_icmd[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_mthd[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_hub[];
-
-extern const struct gf100_gr_init gm204_grctx_init_prop_0[];
-extern const struct gf100_gr_init gm204_grctx_init_setup_0[];
-extern const struct gf100_gr_init gm204_grctx_init_gpm_0[];
-extern const struct gf100_gr_init gm204_grctx_init_gpc_unk_2[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_tpc[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_ppc[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
new file mode 100644
index 0000000..e586699
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+void
+gm200_grctx_generate_tpcid(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int gpc, tpc, id;
+
+	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
+				id++;
+			}
+		}
+	}
+}
+
+static void
+gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
+gm200_grctx_generate_405b60(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+	u32 dist[TPC_MAX / 4] = {};
+	u32 gpcs[GPC_MAX] = {};
+	u8  tpcnr[GPC_MAX];
+	int tpc, gpc, i;
+
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+	/* won't result in the same distribution as the binary driver where
+	 * some of the gpcs have more tpcs than others, but this shall do
+	 * for the moment.  the code for earlier gpus has this issue too.
+	 */
+	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while(!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+		gpcs[gpc] |= i << (tpc * 8);
+	}
+
+	for (i = 0; i < dist_nr; i++)
+		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+	for (i = 0; i < gr->gpc_nr; i++)
+		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+void
+gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	u32 tmp;
+	int i;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
+
+	gm200_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gm200_grctx_generate_rop_active_fbps(gr);
+
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
+
+	gm200_grctx_generate_405b60(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	nvkm_wr32(device, 0x404154, 0x00000800);
+	gf100_gr_mthd(gr, gr->fuc_method);
+
+	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
+}
+
+const struct gf100_grctx_func
+gm200_grctx = {
+	.main  = gm200_grctx_generate_main,
+	.unkn  = gk104_grctx_generate_unkn,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x3000,
+	.bundle_min_gpm_fifo_depth = 0x180,
+	.bundle_token_limit = 0x780,
+	.pagepool = gm107_grctx_generate_pagepool,
+	.pagepool_size = 0x20000,
+	.attrib = gm107_grctx_generate_attrib,
+	.attrib_nr_max = 0x600,
+	.attrib_nr = 0x400,
+	.alpha_nr_max = 0x1800,
+	.alpha_nr = 0x1000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
deleted file mode 100644
index 170cbfd..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ /dev/null
@@ -1,1049 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "ctxgf100.h"
-
-/*******************************************************************************
- * PGRAPH context register lists
- ******************************************************************************/
-
-static const struct gf100_gr_init
-gm204_grctx_init_icmd_0[] = {
-	{ 0x001000,   1, 0x01, 0x00000002 },
-	{ 0x0006aa,   1, 0x01, 0x00000001 },
-	{ 0x0006ad,   2, 0x01, 0x00000100 },
-	{ 0x0006b1,   1, 0x01, 0x00000011 },
-	{ 0x00078c,   1, 0x01, 0x00000008 },
-	{ 0x000792,   1, 0x01, 0x00000001 },
-	{ 0x000794,   3, 0x01, 0x00000001 },
-	{ 0x000797,   1, 0x01, 0x000000cf },
-	{ 0x00079a,   1, 0x01, 0x00000002 },
-	{ 0x0007a1,   1, 0x01, 0x00000001 },
-	{ 0x0007a3,   3, 0x01, 0x00000001 },
-	{ 0x000831,   1, 0x01, 0x00000004 },
-	{ 0x01e100,   1, 0x01, 0x00000001 },
-	{ 0x001000,   1, 0x01, 0x00000008 },
-	{ 0x000039,   3, 0x01, 0x00000000 },
-	{ 0x000380,   1, 0x01, 0x00000001 },
-	{ 0x000366,   2, 0x01, 0x00000000 },
-	{ 0x000368,   1, 0x01, 0x00000fff },
-	{ 0x000370,   2, 0x01, 0x00000000 },
-	{ 0x000372,   1, 0x01, 0x000fffff },
-	{ 0x000374,   1, 0x01, 0x00000100 },
-	{ 0x000818,   8, 0x01, 0x00000000 },
-	{ 0x000848,  16, 0x01, 0x00000000 },
-	{ 0x000738,   1, 0x01, 0x00000000 },
-	{ 0x000b07,   1, 0x01, 0x00000002 },
-	{ 0x000b08,   2, 0x01, 0x00000100 },
-	{ 0x000b0a,   1, 0x01, 0x00000001 },
-	{ 0x000a04,   1, 0x01, 0x000000ff },
-	{ 0x000a0b,   1, 0x01, 0x00000040 },
-	{ 0x00097f,   1, 0x01, 0x00000100 },
-	{ 0x000a02,   1, 0x01, 0x00000001 },
-	{ 0x000809,   1, 0x01, 0x00000007 },
-	{ 0x00c221,   1, 0x01, 0x00000040 },
-	{ 0x00c401,   1, 0x01, 0x00000001 },
-	{ 0x00c402,   1, 0x01, 0x00010001 },
-	{ 0x00c403,   2, 0x01, 0x00000001 },
-	{ 0x00c40e,   1, 0x01, 0x00000020 },
-	{ 0x01e100,   1, 0x01, 0x00000001 },
-	{ 0x001000,   1, 0x01, 0x00000001 },
-	{ 0x000b07,   1, 0x01, 0x00000002 },
-	{ 0x000b08,   2, 0x01, 0x00000100 },
-	{ 0x000b0a,   1, 0x01, 0x00000001 },
-	{ 0x01e100,   1, 0x01, 0x00000001 },
-	{ 0x001000,   1, 0x01, 0x00000004 },
-	{ 0x000039,   3, 0x01, 0x00000000 },
-	{ 0x0000a9,   1, 0x01, 0x0000ffff },
-	{ 0x000038,   1, 0x01, 0x0fac6881 },
-	{ 0x00003d,   1, 0x01, 0x00000001 },
-	{ 0x0000e8,   8, 0x01, 0x00000400 },
-	{ 0x000078,   8, 0x01, 0x00000300 },
-	{ 0x000050,   1, 0x01, 0x00000011 },
-	{ 0x000058,   8, 0x01, 0x00000008 },
-	{ 0x000208,   8, 0x01, 0x00000001 },
-	{ 0x000081,   1, 0x01, 0x00000001 },
-	{ 0x000085,   1, 0x01, 0x00000004 },
-	{ 0x000088,   1, 0x01, 0x00000400 },
-	{ 0x000090,   1, 0x01, 0x00000300 },
-	{ 0x000098,   1, 0x01, 0x00001001 },
-	{ 0x0000e3,   1, 0x01, 0x00000001 },
-	{ 0x0000da,   1, 0x01, 0x00000001 },
-	{ 0x0000b4,   4, 0x01, 0x88888888 },
-	{ 0x0000f8,   1, 0x01, 0x00000003 },
-	{ 0x0000fa,   1, 0x01, 0x00000001 },
-	{ 0x0000b1,   2, 0x01, 0x00000001 },
-	{ 0x00009f,   4, 0x01, 0x0000ffff },
-	{ 0x0000a8,   1, 0x01, 0x0000ffff },
-	{ 0x0000ad,   1, 0x01, 0x0000013e },
-	{ 0x0000e1,   1, 0x01, 0x00000010 },
-	{ 0x000290,  16, 0x01, 0x00000000 },
-	{ 0x0003b0,  16, 0x01, 0x00000000 },
-	{ 0x0002a0,  16, 0x01, 0x00000000 },
-	{ 0x000420,  16, 0x01, 0x00000000 },
-	{ 0x0002b0,  16, 0x01, 0x00000000 },
-	{ 0x000430,  16, 0x01, 0x00000000 },
-	{ 0x0002c0,  16, 0x01, 0x00000000 },
-	{ 0x0004d0,  16, 0x01, 0x00000000 },
-	{ 0x000720,  16, 0x01, 0x00000000 },
-	{ 0x0008c0,  16, 0x01, 0x00000000 },
-	{ 0x000890,  16, 0x01, 0x00000000 },
-	{ 0x0008e0,  16, 0x01, 0x00000000 },
-	{ 0x0008a0,  16, 0x01, 0x00000000 },
-	{ 0x0008f0,  16, 0x01, 0x00000000 },
-	{ 0x00094c,   1, 0x01, 0x000000ff },
-	{ 0x00094d,   1, 0x01, 0xffffffff },
-	{ 0x00094e,   1, 0x01, 0x00000002 },
-	{ 0x0002f2,   2, 0x01, 0x00000001 },
-	{ 0x0002f5,   1, 0x01, 0x00000001 },
-	{ 0x0002f7,   1, 0x01, 0x00000001 },
-	{ 0x000303,   1, 0x01, 0x00000001 },
-	{ 0x0002e6,   1, 0x01, 0x00000001 },
-	{ 0x000466,   1, 0x01, 0x00000052 },
-	{ 0x000301,   1, 0x01, 0x3f800000 },
-	{ 0x000304,   1, 0x01, 0x30201000 },
-	{ 0x000305,   1, 0x01, 0x70605040 },
-	{ 0x000306,   1, 0x01, 0xb8a89888 },
-	{ 0x000307,   1, 0x01, 0xf8e8d8c8 },
-	{ 0x00030a,   1, 0x01, 0x00ffff00 },
-	{ 0x00030b,   1, 0x01, 0x0000001a },
-	{ 0x00030c,   1, 0x01, 0x00000001 },
-	{ 0x000318,   1, 0x01, 0x00000001 },
-	{ 0x000340,   1, 0x01, 0x00000000 },
-	{ 0x00037d,   1, 0x01, 0x00000006 },
-	{ 0x0003a0,   1, 0x01, 0x00000002 },
-	{ 0x0003aa,   1, 0x01, 0x00000001 },
-	{ 0x0003a9,   1, 0x01, 0x00000001 },
-	{ 0x000380,   1, 0x01, 0x00000001 },
-	{ 0x000383,   1, 0x01, 0x00000011 },
-	{ 0x000360,   1, 0x01, 0x00000040 },
-	{ 0x000366,   2, 0x01, 0x00000000 },
-	{ 0x000368,   1, 0x01, 0x00000fff },
-	{ 0x000370,   2, 0x01, 0x00000000 },
-	{ 0x000372,   1, 0x01, 0x000fffff },
-	{ 0x000374,   1, 0x01, 0x00000100 },
-	{ 0x00037a,   1, 0x01, 0x00000012 },
-	{ 0x000619,   1, 0x01, 0x00000003 },
-	{ 0x000811,   1, 0x01, 0x00000003 },
-	{ 0x000812,   1, 0x01, 0x00000004 },
-	{ 0x000813,   1, 0x01, 0x00000006 },
-	{ 0x000814,   1, 0x01, 0x00000008 },
-	{ 0x000815,   1, 0x01, 0x0000000b },
-	{ 0x000800,   6, 0x01, 0x00000001 },
-	{ 0x000632,   1, 0x01, 0x00000001 },
-	{ 0x000633,   1, 0x01, 0x00000002 },
-	{ 0x000634,   1, 0x01, 0x00000003 },
-	{ 0x000635,   1, 0x01, 0x00000004 },
-	{ 0x000654,   1, 0x01, 0x3f800000 },
-	{ 0x000657,   1, 0x01, 0x3f800000 },
-	{ 0x000655,   2, 0x01, 0x3f800000 },
-	{ 0x0006cd,   1, 0x01, 0x3f800000 },
-	{ 0x0007f5,   1, 0x01, 0x3f800000 },
-	{ 0x0007dc,   1, 0x01, 0x39291909 },
-	{ 0x0007dd,   1, 0x01, 0x79695949 },
-	{ 0x0007de,   1, 0x01, 0xb9a99989 },
-	{ 0x0007df,   1, 0x01, 0xf9e9d9c9 },
-	{ 0x0007e8,   1, 0x01, 0x00003210 },
-	{ 0x0007e9,   1, 0x01, 0x00007654 },
-	{ 0x0007ea,   1, 0x01, 0x00000098 },
-	{ 0x0007ec,   1, 0x01, 0x39291909 },
-	{ 0x0007ed,   1, 0x01, 0x79695949 },
-	{ 0x0007ee,   1, 0x01, 0xb9a99989 },
-	{ 0x0007ef,   1, 0x01, 0xf9e9d9c9 },
-	{ 0x0007f0,   1, 0x01, 0x00003210 },
-	{ 0x0007f1,   1, 0x01, 0x00007654 },
-	{ 0x0007f2,   1, 0x01, 0x00000098 },
-	{ 0x0005a5,   1, 0x01, 0x00000001 },
-	{ 0x0005aa,   1, 0x01, 0x00000002 },
-	{ 0x0005cb,   1, 0x01, 0x00000004 },
-	{ 0x0005d0,   1, 0x01, 0x20181008 },
-	{ 0x0005d1,   1, 0x01, 0x40383028 },
-	{ 0x0005d2,   1, 0x01, 0x60585048 },
-	{ 0x0005d3,   1, 0x01, 0x80787068 },
-	{ 0x000980, 128, 0x01, 0x00000000 },
-	{ 0x000468,   1, 0x01, 0x00000004 },
-	{ 0x00046c,   1, 0x01, 0x00000001 },
-	{ 0x000470,  96, 0x01, 0x00000000 },
-	{ 0x0005e0,  16, 0x01, 0x00000d10 },
-	{ 0x000510,  16, 0x01, 0x3f800000 },
-	{ 0x000520,   1, 0x01, 0x000002b6 },
-	{ 0x000529,   1, 0x01, 0x00000001 },
-	{ 0x000530,  16, 0x01, 0xffff0000 },
-	{ 0x000550,  32, 0x01, 0xffff0000 },
-	{ 0x000585,   1, 0x01, 0x0000003f },
-	{ 0x000576,   1, 0x01, 0x00000003 },
-	{ 0x00057b,   1, 0x01, 0x00000059 },
-	{ 0x000586,   1, 0x01, 0x00000040 },
-	{ 0x000582,   2, 0x01, 0x00000080 },
-	{ 0x000595,   1, 0x01, 0x00400040 },
-	{ 0x000596,   1, 0x01, 0x00000492 },
-	{ 0x000597,   1, 0x01, 0x08080203 },
-	{ 0x0005ad,   1, 0x01, 0x00000008 },
-	{ 0x000598,   1, 0x01, 0x00020001 },
-	{ 0x0005d4,   1, 0x01, 0x00000001 },
-	{ 0x0005c2,   1, 0x01, 0x00000001 },
-	{ 0x000638,   2, 0x01, 0x00000001 },
-	{ 0x00063a,   1, 0x01, 0x00000002 },
-	{ 0x00063b,   2, 0x01, 0x00000001 },
-	{ 0x00063d,   1, 0x01, 0x00000002 },
-	{ 0x00063e,   1, 0x01, 0x00000001 },
-	{ 0x0008b8,   8, 0x01, 0x00000001 },
-	{ 0x000900,   8, 0x01, 0x00000001 },
-	{ 0x000908,   8, 0x01, 0x00000002 },
-	{ 0x000910,  16, 0x01, 0x00000001 },
-	{ 0x000920,   8, 0x01, 0x00000002 },
-	{ 0x000928,   8, 0x01, 0x00000001 },
-	{ 0x000662,   1, 0x01, 0x00000001 },
-	{ 0x000648,   9, 0x01, 0x00000001 },
-	{ 0x000674,   1, 0x01, 0x00000001 },
-	{ 0x000658,   1, 0x01, 0x0000000f },
-	{ 0x0007ff,   1, 0x01, 0x0000000a },
-	{ 0x00066a,   1, 0x01, 0x40000000 },
-	{ 0x00066b,   1, 0x01, 0x10000000 },
-	{ 0x00066c,   2, 0x01, 0xffff0000 },
-	{ 0x0007af,   2, 0x01, 0x00000008 },
-	{ 0x0007f6,   1, 0x01, 0x00000001 },
-	{ 0x0006b2,   1, 0x01, 0x00000055 },
-	{ 0x0007ad,   1, 0x01, 0x00000003 },
-	{ 0x000971,   1, 0x01, 0x00000008 },
-	{ 0x000972,   1, 0x01, 0x00000040 },
-	{ 0x000973,   1, 0x01, 0x0000012c },
-	{ 0x00097c,   1, 0x01, 0x00000040 },
-	{ 0x000975,   1, 0x01, 0x00000020 },
-	{ 0x000976,   1, 0x01, 0x00000001 },
-	{ 0x000977,   1, 0x01, 0x00000020 },
-	{ 0x000978,   1, 0x01, 0x00000001 },
-	{ 0x000957,   1, 0x01, 0x00000003 },
-	{ 0x00095e,   1, 0x01, 0x20164010 },
-	{ 0x00095f,   1, 0x01, 0x00000020 },
-	{ 0x000a0d,   1, 0x01, 0x00000006 },
-	{ 0x00097d,   1, 0x01, 0x0000000c },
-	{ 0x000683,   1, 0x01, 0x00000006 },
-	{ 0x000687,   1, 0x01, 0x003fffff },
-	{ 0x0006a0,   1, 0x01, 0x00000005 },
-	{ 0x000840,   1, 0x01, 0x00400008 },
-	{ 0x000841,   1, 0x01, 0x08000080 },
-	{ 0x000842,   1, 0x01, 0x00400008 },
-	{ 0x000843,   1, 0x01, 0x08000080 },
-	{ 0x000818,   8, 0x01, 0x00000000 },
-	{ 0x000848,  16, 0x01, 0x00000000 },
-	{ 0x000738,   1, 0x01, 0x00000000 },
-	{ 0x0006aa,   1, 0x01, 0x00000001 },
-	{ 0x0006ab,   1, 0x01, 0x00000002 },
-	{ 0x0006ac,   1, 0x01, 0x00000080 },
-	{ 0x0006ad,   2, 0x01, 0x00000100 },
-	{ 0x0006b1,   1, 0x01, 0x00000011 },
-	{ 0x0006bb,   1, 0x01, 0x000000cf },
-	{ 0x0006ce,   1, 0x01, 0x2a712488 },
-	{ 0x000739,   1, 0x01, 0x4085c000 },
-	{ 0x00073a,   1, 0x01, 0x00000080 },
-	{ 0x000786,   1, 0x01, 0x80000100 },
-	{ 0x00073c,   1, 0x01, 0x00010100 },
-	{ 0x00073d,   1, 0x01, 0x02800000 },
-	{ 0x000787,   1, 0x01, 0x000000cf },
-	{ 0x00078c,   1, 0x01, 0x00000008 },
-	{ 0x000792,   1, 0x01, 0x00000001 },
-	{ 0x000794,   3, 0x01, 0x00000001 },
-	{ 0x000797,   1, 0x01, 0x000000cf },
-	{ 0x000836,   1, 0x01, 0x00000001 },
-	{ 0x00079a,   1, 0x01, 0x00000002 },
-	{ 0x000833,   1, 0x01, 0x04444480 },
-	{ 0x0007a1,   1, 0x01, 0x00000001 },
-	{ 0x0007a3,   3, 0x01, 0x00000001 },
-	{ 0x000831,   1, 0x01, 0x00000004 },
-	{ 0x000b07,   1, 0x01, 0x00000002 },
-	{ 0x000b08,   2, 0x01, 0x00000100 },
-	{ 0x000b0a,   1, 0x01, 0x00000001 },
-	{ 0x000a04,   1, 0x01, 0x000000ff },
-	{ 0x000a0b,   1, 0x01, 0x00000040 },
-	{ 0x00097f,   1, 0x01, 0x00000100 },
-	{ 0x000a02,   1, 0x01, 0x00000001 },
-	{ 0x000809,   1, 0x01, 0x00000007 },
-	{ 0x00c221,   1, 0x01, 0x00000040 },
-	{ 0x00c1b0,   8, 0x01, 0x0000000f },
-	{ 0x00c1b8,   1, 0x01, 0x0fac6881 },
-	{ 0x00c1b9,   1, 0x01, 0x00fac688 },
-	{ 0x00c401,   1, 0x01, 0x00000001 },
-	{ 0x00c402,   1, 0x01, 0x00010001 },
-	{ 0x00c403,   2, 0x01, 0x00000001 },
-	{ 0x00c40e,   1, 0x01, 0x00000020 },
-	{ 0x00c413,   4, 0x01, 0x88888888 },
-	{ 0x00c423,   1, 0x01, 0x0000ff00 },
-	{ 0x00c420,   1, 0x01, 0x00880101 },
-	{ 0x01e100,   1, 0x01, 0x00000001 },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_icmd[] = {
-	{ gm204_grctx_init_icmd_0 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_b197_0[] = {
-	{ 0x000800,   8, 0x40, 0x00000000 },
-	{ 0x000804,   8, 0x40, 0x00000000 },
-	{ 0x000808,   8, 0x40, 0x00000400 },
-	{ 0x00080c,   8, 0x40, 0x00000300 },
-	{ 0x000810,   1, 0x04, 0x000000cf },
-	{ 0x000850,   7, 0x40, 0x00000000 },
-	{ 0x000814,   8, 0x40, 0x00000040 },
-	{ 0x000818,   8, 0x40, 0x00000001 },
-	{ 0x00081c,   8, 0x40, 0x00000000 },
-	{ 0x000820,   8, 0x40, 0x00000000 },
-	{ 0x001c00,  16, 0x10, 0x00000000 },
-	{ 0x001c04,  16, 0x10, 0x00000000 },
-	{ 0x001c08,  16, 0x10, 0x00000000 },
-	{ 0x001c0c,  16, 0x10, 0x00000000 },
-	{ 0x001d00,  16, 0x10, 0x00000000 },
-	{ 0x001d04,  16, 0x10, 0x00000000 },
-	{ 0x001d08,  16, 0x10, 0x00000000 },
-	{ 0x001d0c,  16, 0x10, 0x00000000 },
-	{ 0x001f00,  16, 0x08, 0x00000000 },
-	{ 0x001f04,  16, 0x08, 0x00000000 },
-	{ 0x001f80,  16, 0x08, 0x00000000 },
-	{ 0x001f84,  16, 0x08, 0x00000000 },
-	{ 0x002000,   1, 0x04, 0x00000000 },
-	{ 0x002040,   1, 0x04, 0x00000011 },
-	{ 0x002080,   1, 0x04, 0x00000020 },
-	{ 0x0020c0,   1, 0x04, 0x00000030 },
-	{ 0x002100,   1, 0x04, 0x00000040 },
-	{ 0x002140,   1, 0x04, 0x00000051 },
-	{ 0x00200c,   6, 0x40, 0x00000001 },
-	{ 0x002010,   1, 0x04, 0x00000000 },
-	{ 0x002050,   1, 0x04, 0x00000000 },
-	{ 0x002090,   1, 0x04, 0x00000001 },
-	{ 0x0020d0,   1, 0x04, 0x00000002 },
-	{ 0x002110,   1, 0x04, 0x00000003 },
-	{ 0x002150,   1, 0x04, 0x00000004 },
-	{ 0x000380,   4, 0x20, 0x00000000 },
-	{ 0x000384,   4, 0x20, 0x00000000 },
-	{ 0x000388,   4, 0x20, 0x00000000 },
-	{ 0x00038c,   4, 0x20, 0x00000000 },
-	{ 0x000700,   4, 0x10, 0x00000000 },
-	{ 0x000704,   4, 0x10, 0x00000000 },
-	{ 0x000708,   4, 0x10, 0x00000000 },
-	{ 0x002800, 128, 0x04, 0x00000000 },
-	{ 0x000a00,  16, 0x20, 0x00000000 },
-	{ 0x000a04,  16, 0x20, 0x00000000 },
-	{ 0x000a08,  16, 0x20, 0x00000000 },
-	{ 0x000a0c,  16, 0x20, 0x00000000 },
-	{ 0x000a10,  16, 0x20, 0x00000000 },
-	{ 0x000a14,  16, 0x20, 0x00000000 },
-	{ 0x000a18,  16, 0x20, 0x00006420 },
-	{ 0x000a1c,  16, 0x20, 0x00000000 },
-	{ 0x000c00,  16, 0x10, 0x00000000 },
-	{ 0x000c04,  16, 0x10, 0x00000000 },
-	{ 0x000c08,  16, 0x10, 0x00000000 },
-	{ 0x000c0c,  16, 0x10, 0x3f800000 },
-	{ 0x000d00,   8, 0x08, 0xffff0000 },
-	{ 0x000d04,   8, 0x08, 0xffff0000 },
-	{ 0x000e00,  16, 0x10, 0x00000000 },
-	{ 0x000e04,  16, 0x10, 0xffff0000 },
-	{ 0x000e08,  16, 0x10, 0xffff0000 },
-	{ 0x000d40,   4, 0x08, 0x00000000 },
-	{ 0x000d44,   4, 0x08, 0x00000000 },
-	{ 0x001e00,   8, 0x20, 0x00000001 },
-	{ 0x001e04,   8, 0x20, 0x00000001 },
-	{ 0x001e08,   8, 0x20, 0x00000002 },
-	{ 0x001e0c,   8, 0x20, 0x00000001 },
-	{ 0x001e10,   8, 0x20, 0x00000001 },
-	{ 0x001e14,   8, 0x20, 0x00000002 },
-	{ 0x001e18,   8, 0x20, 0x00000001 },
-	{ 0x001480,   8, 0x10, 0x00000000 },
-	{ 0x001484,   8, 0x10, 0x00000000 },
-	{ 0x001488,   8, 0x10, 0x00000000 },
-	{ 0x003400, 128, 0x04, 0x00000000 },
-	{ 0x00030c,   1, 0x04, 0x00000001 },
-	{ 0x001944,   1, 0x04, 0x00000000 },
-	{ 0x001514,   1, 0x04, 0x00000000 },
-	{ 0x000d68,   1, 0x04, 0x0000ffff },
-	{ 0x00121c,   1, 0x04, 0x0fac6881 },
-	{ 0x000fac,   1, 0x04, 0x00000001 },
-	{ 0x001538,   1, 0x04, 0x00000001 },
-	{ 0x000fe0,   2, 0x04, 0x00000000 },
-	{ 0x000fe8,   1, 0x04, 0x00000014 },
-	{ 0x000fec,   1, 0x04, 0x00000040 },
-	{ 0x000ff0,   1, 0x04, 0x00000000 },
-	{ 0x00179c,   1, 0x04, 0x00000000 },
-	{ 0x001228,   1, 0x04, 0x00000400 },
-	{ 0x00122c,   1, 0x04, 0x00000300 },
-	{ 0x001230,   1, 0x04, 0x00010001 },
-	{ 0x0007f8,   1, 0x04, 0x00000000 },
-	{ 0x001208,   1, 0x04, 0x00000000 },
-	{ 0x0015b4,   1, 0x04, 0x00000001 },
-	{ 0x0015cc,   1, 0x04, 0x00000000 },
-	{ 0x001534,   1, 0x04, 0x00000000 },
-	{ 0x000754,   1, 0x04, 0x00000001 },
-	{ 0x000fb0,   1, 0x04, 0x00000000 },
-	{ 0x0015d0,   1, 0x04, 0x00000000 },
-	{ 0x0011e0,   4, 0x04, 0x88888888 },
-	{ 0x00153c,   1, 0x04, 0x00000000 },
-	{ 0x0016b4,   1, 0x04, 0x00000003 },
-	{ 0x000fa4,   1, 0x04, 0x00000001 },
-	{ 0x000fbc,   4, 0x04, 0x0000ffff },
-	{ 0x000fa8,   1, 0x04, 0x0000ffff },
-	{ 0x000df8,   2, 0x04, 0x00000000 },
-	{ 0x001948,   1, 0x04, 0x00000000 },
-	{ 0x001970,   1, 0x04, 0x00000001 },
-	{ 0x00161c,   1, 0x04, 0x000009f0 },
-	{ 0x000dcc,   1, 0x04, 0x00000010 },
-	{ 0x0015e4,   1, 0x04, 0x00000000 },
-	{ 0x001160,  32, 0x04, 0x25e00040 },
-	{ 0x001880,  32, 0x04, 0x00000000 },
-	{ 0x000f84,   2, 0x04, 0x00000000 },
-	{ 0x0017c8,   2, 0x04, 0x00000000 },
-	{ 0x0017d0,   1, 0x04, 0x000000ff },
-	{ 0x0017d4,   1, 0x04, 0xffffffff },
-	{ 0x0017d8,   1, 0x04, 0x00000002 },
-	{ 0x0017dc,   1, 0x04, 0x00000000 },
-	{ 0x0015f4,   2, 0x04, 0x00000000 },
-	{ 0x001434,   2, 0x04, 0x00000000 },
-	{ 0x000d74,   1, 0x04, 0x00000000 },
-	{ 0x0013a4,   1, 0x04, 0x00000000 },
-	{ 0x001318,   1, 0x04, 0x00000001 },
-	{ 0x001080,   2, 0x04, 0x00000000 },
-	{ 0x001088,   2, 0x04, 0x00000001 },
-	{ 0x001090,   1, 0x04, 0x00000000 },
-	{ 0x001094,   1, 0x04, 0x00000001 },
-	{ 0x001098,   1, 0x04, 0x00000000 },
-	{ 0x00109c,   1, 0x04, 0x00000001 },
-	{ 0x0010a0,   2, 0x04, 0x00000000 },
-	{ 0x001644,   1, 0x04, 0x00000000 },
-	{ 0x000748,   1, 0x04, 0x00000000 },
-	{ 0x000de8,   1, 0x04, 0x00000000 },
-	{ 0x001648,   1, 0x04, 0x00000000 },
-	{ 0x0012a4,   1, 0x04, 0x00000000 },
-	{ 0x001120,   4, 0x04, 0x00000000 },
-	{ 0x001118,   1, 0x04, 0x00000000 },
-	{ 0x00164c,   1, 0x04, 0x00000000 },
-	{ 0x001658,   1, 0x04, 0x00000000 },
-	{ 0x001910,   1, 0x04, 0x00000290 },
-	{ 0x001518,   1, 0x04, 0x00000000 },
-	{ 0x00165c,   1, 0x04, 0x00000001 },
-	{ 0x001520,   1, 0x04, 0x00000000 },
-	{ 0x001604,   1, 0x04, 0x00000000 },
-	{ 0x001570,   1, 0x04, 0x00000000 },
-	{ 0x0013b0,   2, 0x04, 0x3f800000 },
-	{ 0x00020c,   1, 0x04, 0x00000000 },
-	{ 0x001670,   1, 0x04, 0x30201000 },
-	{ 0x001674,   1, 0x04, 0x70605040 },
-	{ 0x001678,   1, 0x04, 0xb8a89888 },
-	{ 0x00167c,   1, 0x04, 0xf8e8d8c8 },
-	{ 0x00166c,   1, 0x04, 0x00000000 },
-	{ 0x001680,   1, 0x04, 0x00ffff00 },
-	{ 0x0012d0,   1, 0x04, 0x00000003 },
-	{ 0x00113c,   1, 0x04, 0x00000000 },
-	{ 0x0012d4,   1, 0x04, 0x00000002 },
-	{ 0x001684,   2, 0x04, 0x00000000 },
-	{ 0x000dac,   2, 0x04, 0x00001b02 },
-	{ 0x000db4,   1, 0x04, 0x00000000 },
-	{ 0x00168c,   1, 0x04, 0x00000000 },
-	{ 0x0015bc,   1, 0x04, 0x00000000 },
-	{ 0x00156c,   1, 0x04, 0x00000000 },
-	{ 0x00187c,   1, 0x04, 0x00000000 },
-	{ 0x001110,   1, 0x04, 0x00000001 },
-	{ 0x000dc0,   3, 0x04, 0x00000000 },
-	{ 0x000f40,   5, 0x04, 0x00000000 },
-	{ 0x001234,   1, 0x04, 0x00000000 },
-	{ 0x001690,   1, 0x04, 0x00000000 },
-	{ 0x000790,   5, 0x04, 0x00000000 },
-	{ 0x00077c,   1, 0x04, 0x00000000 },
-	{ 0x001000,   1, 0x04, 0x00000010 },
-	{ 0x0010fc,   1, 0x04, 0x00000000 },
-	{ 0x001290,   1, 0x04, 0x00000000 },
-	{ 0x000218,   1, 0x04, 0x00000010 },
-	{ 0x0012d8,   1, 0x04, 0x00000000 },
-	{ 0x0012dc,   1, 0x04, 0x00000010 },
-	{ 0x000d94,   1, 0x04, 0x00000001 },
-	{ 0x00155c,   2, 0x04, 0x00000000 },
-	{ 0x001564,   1, 0x04, 0x00000fff },
-	{ 0x001574,   2, 0x04, 0x00000000 },
-	{ 0x00157c,   1, 0x04, 0x000fffff },
-	{ 0x001354,   1, 0x04, 0x00000000 },
-	{ 0x001610,   1, 0x04, 0x00000012 },
-	{ 0x001608,   2, 0x04, 0x00000000 },
-	{ 0x00260c,   1, 0x04, 0x00000000 },
-	{ 0x0007ac,   1, 0x04, 0x00000000 },
-	{ 0x00162c,   1, 0x04, 0x00000003 },
-	{ 0x000210,   1, 0x04, 0x00000000 },
-	{ 0x000320,   1, 0x04, 0x00000000 },
-	{ 0x000324,   6, 0x04, 0x3f800000 },
-	{ 0x000750,   1, 0x04, 0x00000000 },
-	{ 0x000760,   1, 0x04, 0x39291909 },
-	{ 0x000764,   1, 0x04, 0x79695949 },
-	{ 0x000768,   1, 0x04, 0xb9a99989 },
-	{ 0x00076c,   1, 0x04, 0xf9e9d9c9 },
-	{ 0x000770,   1, 0x04, 0x30201000 },
-	{ 0x000774,   1, 0x04, 0x70605040 },
-	{ 0x000778,   1, 0x04, 0x00009080 },
-	{ 0x000780,   1, 0x04, 0x39291909 },
-	{ 0x000784,   1, 0x04, 0x79695949 },
-	{ 0x000788,   1, 0x04, 0xb9a99989 },
-	{ 0x00078c,   1, 0x04, 0xf9e9d9c9 },
-	{ 0x0007d0,   1, 0x04, 0x30201000 },
-	{ 0x0007d4,   1, 0x04, 0x70605040 },
-	{ 0x0007d8,   1, 0x04, 0x00009080 },
-	{ 0x001004,   1, 0x04, 0x00000000 },
-	{ 0x001240,   8, 0x04, 0x00000000 },
-	{ 0x00037c,   1, 0x04, 0x00000001 },
-	{ 0x000740,   1, 0x04, 0x00000000 },
-	{ 0x001148,   1, 0x04, 0x00000000 },
-	{ 0x000fb4,   1, 0x04, 0x00000000 },
-	{ 0x000fb8,   1, 0x04, 0x00000002 },
-	{ 0x001130,   1, 0x04, 0x00000002 },
-	{ 0x000fd4,   2, 0x04, 0x00000000 },
-	{ 0x001030,   1, 0x04, 0x20181008 },
-	{ 0x001034,   1, 0x04, 0x40383028 },
-	{ 0x001038,   1, 0x04, 0x60585048 },
-	{ 0x00103c,   1, 0x04, 0x80787068 },
-	{ 0x000744,   1, 0x04, 0x00000000 },
-	{ 0x002600,   1, 0x04, 0x00000000 },
-	{ 0x001918,   1, 0x04, 0x00000000 },
-	{ 0x00191c,   1, 0x04, 0x00000900 },
-	{ 0x001920,   1, 0x04, 0x00000405 },
-	{ 0x001308,   1, 0x04, 0x00000001 },
-	{ 0x001924,   1, 0x04, 0x00000000 },
-	{ 0x0013ac,   1, 0x04, 0x00000000 },
-	{ 0x00192c,   1, 0x04, 0x00000001 },
-	{ 0x00193c,   1, 0x04, 0x00002c1c },
-	{ 0x000d7c,   1, 0x04, 0x00000000 },
-	{ 0x000f8c,   1, 0x04, 0x00000000 },
-	{ 0x0002c0,   1, 0x04, 0x00000001 },
-	{ 0x001510,   1, 0x04, 0x00000000 },
-	{ 0x001940,   1, 0x04, 0x00000000 },
-	{ 0x000ff4,   2, 0x04, 0x00000000 },
-	{ 0x00194c,   2, 0x04, 0x00000000 },
-	{ 0x001968,   1, 0x04, 0x00000000 },
-	{ 0x001590,   1, 0x04, 0x0000003f },
-	{ 0x0007e8,   4, 0x04, 0x00000000 },
-	{ 0x00196c,   1, 0x04, 0x00000011 },
-	{ 0x0002e4,   1, 0x04, 0x0000b001 },
-	{ 0x00036c,   2, 0x04, 0x00000000 },
-	{ 0x00197c,   1, 0x04, 0x00000000 },
-	{ 0x000fcc,   2, 0x04, 0x00000000 },
-	{ 0x0002d8,   1, 0x04, 0x00000040 },
-	{ 0x001980,   1, 0x04, 0x00000080 },
-	{ 0x001504,   1, 0x04, 0x00000080 },
-	{ 0x001984,   1, 0x04, 0x00000000 },
-	{ 0x000f60,   1, 0x04, 0x00000000 },
-	{ 0x000f64,   1, 0x04, 0x00400040 },
-	{ 0x000f68,   1, 0x04, 0x00002212 },
-	{ 0x000f6c,   1, 0x04, 0x08080203 },
-	{ 0x001108,   1, 0x04, 0x00000008 },
-	{ 0x000f70,   1, 0x04, 0x00080001 },
-	{ 0x000ffc,   1, 0x04, 0x00000000 },
-	{ 0x001134,   1, 0x04, 0x00000000 },
-	{ 0x000f1c,   1, 0x04, 0x00000000 },
-	{ 0x0011f8,   1, 0x04, 0x00000000 },
-	{ 0x001138,   1, 0x04, 0x00000001 },
-	{ 0x000300,   1, 0x04, 0x00000001 },
-	{ 0x0013a8,   1, 0x04, 0x00000000 },
-	{ 0x001224,   1, 0x04, 0x00000000 },
-	{ 0x0012ec,   1, 0x04, 0x00000000 },
-	{ 0x001310,   1, 0x04, 0x00000000 },
-	{ 0x001314,   1, 0x04, 0x00000001 },
-	{ 0x001380,   1, 0x04, 0x00000000 },
-	{ 0x001384,   4, 0x04, 0x00000001 },
-	{ 0x001394,   1, 0x04, 0x00000000 },
-	{ 0x00139c,   1, 0x04, 0x00000000 },
-	{ 0x001398,   1, 0x04, 0x00000000 },
-	{ 0x001594,   1, 0x04, 0x00000000 },
-	{ 0x001598,   4, 0x04, 0x00000001 },
-	{ 0x000f54,   3, 0x04, 0x00000000 },
-	{ 0x0019bc,   1, 0x04, 0x00000000 },
-	{ 0x000f9c,   2, 0x04, 0x00000000 },
-	{ 0x0012cc,   1, 0x04, 0x00000000 },
-	{ 0x0012e8,   1, 0x04, 0x00000000 },
-	{ 0x00130c,   1, 0x04, 0x00000001 },
-	{ 0x001360,   8, 0x04, 0x00000000 },
-	{ 0x00133c,   2, 0x04, 0x00000001 },
-	{ 0x001344,   1, 0x04, 0x00000002 },
-	{ 0x001348,   2, 0x04, 0x00000001 },
-	{ 0x001350,   1, 0x04, 0x00000002 },
-	{ 0x001358,   1, 0x04, 0x00000001 },
-	{ 0x0012e4,   1, 0x04, 0x00000000 },
-	{ 0x00131c,   4, 0x04, 0x00000000 },
-	{ 0x0019c0,   1, 0x04, 0x00000000 },
-	{ 0x001140,   1, 0x04, 0x00000000 },
-	{ 0x000dd0,   1, 0x04, 0x00000000 },
-	{ 0x000dd4,   1, 0x04, 0x00000001 },
-	{ 0x0002f4,   1, 0x04, 0x00000000 },
-	{ 0x0019c4,   1, 0x04, 0x00000000 },
-	{ 0x0019c8,   1, 0x04, 0x00001500 },
-	{ 0x00135c,   1, 0x04, 0x00000000 },
-	{ 0x000f90,   1, 0x04, 0x00000000 },
-	{ 0x0019e0,   8, 0x04, 0x00000001 },
-	{ 0x0019cc,   1, 0x04, 0x00000001 },
-	{ 0x00111c,   1, 0x04, 0x00000001 },
-	{ 0x0015b8,   1, 0x04, 0x00000000 },
-	{ 0x001a00,   1, 0x04, 0x00001111 },
-	{ 0x001a04,   7, 0x04, 0x00000000 },
-	{ 0x000d6c,   2, 0x04, 0xffff0000 },
-	{ 0x0010f8,   1, 0x04, 0x00001010 },
-	{ 0x000d80,   5, 0x04, 0x00000000 },
-	{ 0x000da0,   1, 0x04, 0x00000000 },
-	{ 0x0007a4,   2, 0x04, 0x00000000 },
-	{ 0x001508,   1, 0x04, 0x80000000 },
-	{ 0x00150c,   1, 0x04, 0x40000000 },
-	{ 0x001668,   1, 0x04, 0x00000000 },
-	{ 0x000318,   2, 0x04, 0x00000008 },
-	{ 0x000d9c,   1, 0x04, 0x00000001 },
-	{ 0x000f14,   1, 0x04, 0x00000000 },
-	{ 0x000374,   1, 0x04, 0x00000000 },
-	{ 0x000378,   1, 0x04, 0x0000000c },
-	{ 0x0007dc,   1, 0x04, 0x00000000 },
-	{ 0x00074c,   1, 0x04, 0x00000055 },
-	{ 0x001420,   1, 0x04, 0x00000003 },
-	{ 0x001008,   1, 0x04, 0x00000008 },
-	{ 0x00100c,   1, 0x04, 0x00000040 },
-	{ 0x001010,   1, 0x04, 0x0000012c },
-	{ 0x000d60,   1, 0x04, 0x00000040 },
-	{ 0x001018,   1, 0x04, 0x00000020 },
-	{ 0x00101c,   1, 0x04, 0x00000001 },
-	{ 0x001020,   1, 0x04, 0x00000020 },
-	{ 0x001024,   1, 0x04, 0x00000001 },
-	{ 0x001444,   3, 0x04, 0x00000000 },
-	{ 0x000360,   1, 0x04, 0x20164010 },
-	{ 0x000364,   1, 0x04, 0x00000020 },
-	{ 0x000368,   1, 0x04, 0x00000000 },
-	{ 0x000da8,   1, 0x04, 0x00000030 },
-	{ 0x000de4,   1, 0x04, 0x00000000 },
-	{ 0x000204,   1, 0x04, 0x00000006 },
-	{ 0x0002d0,   1, 0x04, 0x003fffff },
-	{ 0x001220,   1, 0x04, 0x00000005 },
-	{ 0x000fdc,   1, 0x04, 0x00000000 },
-	{ 0x000f98,   1, 0x04, 0x00400008 },
-	{ 0x001284,   1, 0x04, 0x08000080 },
-	{ 0x001450,   1, 0x04, 0x00400008 },
-	{ 0x001454,   1, 0x04, 0x08000080 },
-	{ 0x000214,   1, 0x04, 0x00000000 },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_mthd[] = {
-	{ gm204_grctx_init_b197_0, 0xb197 },
-	{ gf100_grctx_init_902d_0, 0x902d },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_fe_0[] = {
-	{ 0x404004,   8, 0x04, 0x00000000 },
-	{ 0x404024,   1, 0x04, 0x0000e000 },
-	{ 0x404028,   8, 0x04, 0x00000000 },
-	{ 0x4040a8,   8, 0x04, 0x00000000 },
-	{ 0x4040c8,   1, 0x04, 0xf801008f },
-	{ 0x4040d0,   6, 0x04, 0x00000000 },
-	{ 0x4040f8,   1, 0x04, 0x00000000 },
-	{ 0x404100,  10, 0x04, 0x00000000 },
-	{ 0x404130,   2, 0x04, 0x00000000 },
-	{ 0x404150,   1, 0x04, 0x0000002e },
-	{ 0x404154,   2, 0x04, 0x00000800 },
-	{ 0x404164,   1, 0x04, 0x00000045 },
-	{ 0x40417c,   2, 0x04, 0x00000000 },
-	{ 0x404194,   1, 0x04, 0x33000700 },
-	{ 0x4041a0,   4, 0x04, 0x00000000 },
-	{ 0x4041c4,   2, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_ds_0[] = {
-	{ 0x405800,   1, 0x04, 0x8f8001bf },
-	{ 0x405830,   1, 0x04, 0x04001000 },
-	{ 0x405834,   1, 0x04, 0x08000000 },
-	{ 0x405838,   1, 0x04, 0x00010000 },
-	{ 0x405854,   1, 0x04, 0x00000000 },
-	{ 0x405870,   4, 0x04, 0x00000001 },
-	{ 0x405a00,   2, 0x04, 0x00000000 },
-	{ 0x405a18,   1, 0x04, 0x00000000 },
-	{ 0x405a1c,   1, 0x04, 0x000000ff },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_cwd_0[] = {
-	{ 0x405b00,   1, 0x04, 0x00000000 },
-	{ 0x405b10,   1, 0x04, 0x00001000 },
-	{ 0x405b20,   1, 0x04, 0x04000000 },
-	{ 0x405b60,   6, 0x04, 0x00000000 },
-	{ 0x405ba0,   6, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pd_0[] = {
-	{ 0x406020,   1, 0x04, 0x17410001 },
-	{ 0x406028,   4, 0x04, 0x00000001 },
-	{ 0x4064a8,   1, 0x04, 0x00000000 },
-	{ 0x4064ac,   1, 0x04, 0x00003fff },
-	{ 0x4064b0,   3, 0x04, 0x00000000 },
-	{ 0x4064c0,   1, 0x04, 0x80400280 },
-	{ 0x4064c4,   1, 0x04, 0x0400ffff },
-	{ 0x4064c8,   1, 0x04, 0x01800780 },
-	{ 0x4064cc,   9, 0x04, 0x00000000 },
-	{ 0x4064fc,   1, 0x04, 0x0000022a },
-	{ 0x406500,   1, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_be_0[] = {
-	{ 0x408800,   1, 0x04, 0x32882a3c },
-	{ 0x408804,   1, 0x04, 0x00000040 },
-	{ 0x408808,   1, 0x04, 0x1003e005 },
-	{ 0x408840,   1, 0x04, 0x00000e0b },
-	{ 0x408900,   1, 0x04, 0xb080b801 },
-	{ 0x408904,   1, 0x04, 0x63038001 },
-	{ 0x408908,   1, 0x04, 0x12c8502f },
-	{ 0x408980,   1, 0x04, 0x0000011d },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_hub[] = {
-	{ gf100_grctx_init_main_0 },
-	{ gm204_grctx_init_fe_0 },
-	{ gk110_grctx_init_pri_0 },
-	{ gk104_grctx_init_memfmt_0 },
-	{ gm204_grctx_init_ds_0 },
-	{ gm204_grctx_init_cwd_0 },
-	{ gm204_grctx_init_pd_0 },
-	{ gk208_grctx_init_rstr2d_0 },
-	{ gk104_grctx_init_scc_0 },
-	{ gm204_grctx_init_be_0 },
-	{}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_prop_0[] = {
-	{ 0x418400,   1, 0x04, 0x38e01e00 },
-	{ 0x418404,   1, 0x04, 0x70001fff },
-	{ 0x41840c,   1, 0x04, 0x20001008 },
-	{ 0x418410,   2, 0x04, 0x0fff0fff },
-	{ 0x418418,   1, 0x04, 0x07ff07ff },
-	{ 0x41841c,   1, 0x04, 0x3feffbff },
-	{ 0x418450,   6, 0x04, 0x00000000 },
-	{ 0x418468,   1, 0x04, 0x00000001 },
-	{ 0x41846c,   2, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_gpc_unk_1[] = {
-	{ 0x418600,   1, 0x04, 0x0000007f },
-	{ 0x418684,   1, 0x04, 0x0000001f },
-	{ 0x418700,   1, 0x04, 0x00000002 },
-	{ 0x418704,   1, 0x04, 0x00000080 },
-	{ 0x418708,   1, 0x04, 0x40000000 },
-	{ 0x41870c,   2, 0x04, 0x00000000 },
-	{ 0x418728,   1, 0x04, 0x00010000 },
-	{}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_setup_0[] = {
-	{ 0x418800,   1, 0x04, 0x7006863a },
-	{ 0x418808,   1, 0x04, 0x00000000 },
-	{ 0x418810,   1, 0x04, 0x00000000 },
-	{ 0x418828,   1, 0x04, 0x00000044 },
-	{ 0x418830,   1, 0x04, 0x10000001 },
-	{ 0x4188d8,   1, 0x04, 0x00000008 },
-	{ 0x4188e0,   1, 0x04, 0x01000000 },
-	{ 0x4188e8,   5, 0x04, 0x00000000 },
-	{ 0x4188fc,   1, 0x04, 0x20100058 },
-	{}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_gpm_0[] = {
-	{ 0x418c10,   8, 0x04, 0x00000000 },
-	{ 0x418c40,   1, 0x04, 0xffffffff },
-	{ 0x418c6c,   1, 0x04, 0x00000001 },
-	{ 0x418c80,   1, 0x04, 0x20200000 },
-	{}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_gpc_unk_2[] = {
-	{ 0x418e00,   1, 0x04, 0x90040000 },
-	{ 0x418e24,   1, 0x04, 0x00000000 },
-	{ 0x418e28,   1, 0x04, 0x00000030 },
-	{ 0x418e2c,   1, 0x04, 0x00000100 },
-	{ 0x418e30,   3, 0x04, 0x00000000 },
-	{ 0x418e40,  22, 0x04, 0x00000000 },
-	{ 0x418ea0,  12, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_pack
-gm204_grctx_pack_gpc[] = {
-	{ gm107_grctx_init_gpc_unk_0 },
-	{ gm204_grctx_init_prop_0 },
-	{ gm204_grctx_init_gpc_unk_1 },
-	{ gm204_grctx_init_setup_0 },
-	{ gf100_grctx_init_zcull_0 },
-	{ gk208_grctx_init_crstr_0 },
-	{ gm204_grctx_init_gpm_0 },
-	{ gm204_grctx_init_gpc_unk_2 },
-	{ gf100_grctx_init_gcc_0 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pe_0[] = {
-	{ 0x419848,   1, 0x04, 0x00000000 },
-	{ 0x419864,   1, 0x04, 0x00000029 },
-	{ 0x419888,   1, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_tex_0[] = {
-	{ 0x419a00,   1, 0x04, 0x000100f0 },
-	{ 0x419a04,   1, 0x04, 0x00000005 },
-	{ 0x419a08,   1, 0x04, 0x00000621 },
-	{ 0x419a0c,   1, 0x04, 0x00320000 },
-	{ 0x419a10,   1, 0x04, 0x00000000 },
-	{ 0x419a14,   1, 0x04, 0x00000200 },
-	{ 0x419a1c,   1, 0x04, 0x0010c000 },
-	{ 0x419a20,   1, 0x04, 0x20008a00 },
-	{ 0x419a30,   1, 0x04, 0x00000001 },
-	{ 0x419a3c,   1, 0x04, 0x0000181e },
-	{ 0x419ac4,   1, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_mpc_0[] = {
-	{ 0x419c00,   1, 0x04, 0x0000009a },
-	{ 0x419c04,   1, 0x04, 0x80000bd6 },
-	{ 0x419c08,   1, 0x04, 0x00000002 },
-	{ 0x419c20,   1, 0x04, 0x00000000 },
-	{ 0x419c24,   1, 0x04, 0x00084210 },
-	{ 0x419c28,   1, 0x04, 0x3efbefbe },
-	{ 0x419c2c,   1, 0x04, 0x00000000 },
-	{ 0x419c34,   1, 0x04, 0x71ff1ff3 },
-	{ 0x419c3c,   1, 0x04, 0x00001919 },
-	{ 0x419c50,   1, 0x04, 0x00000005 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_l1c_0[] = {
-	{ 0x419c84,   1, 0x04, 0x0000003e },
-	{ 0x419c90,   1, 0x04, 0x0000000a },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_sm_0[] = {
-	{ 0x419e04,   3, 0x04, 0x00000000 },
-	{ 0x419e10,   1, 0x04, 0x00001c02 },
-	{ 0x419e44,   1, 0x04, 0x00d3eff2 },
-	{ 0x419e48,   1, 0x04, 0x00000000 },
-	{ 0x419e4c,   1, 0x04, 0x0000007f },
-	{ 0x419e50,   1, 0x04, 0x00000000 },
-	{ 0x419e58,   6, 0x04, 0x00000000 },
-	{ 0x419e74,  10, 0x04, 0x00000000 },
-	{ 0x419eac,   1, 0x04, 0x0001cf8b },
-	{ 0x419eb0,   1, 0x04, 0x00030300 },
-	{ 0x419eb8,   1, 0x04, 0x40000000 },
-	{ 0x419ef0,  24, 0x04, 0x00000000 },
-	{ 0x419f68,   2, 0x04, 0x00000000 },
-	{ 0x419f70,   1, 0x04, 0x00000020 },
-	{ 0x419f78,   1, 0x04, 0x00010beb },
-	{ 0x419f7c,   1, 0x04, 0x00000000 },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_tpc[] = {
-	{ gm204_grctx_init_pe_0 },
-	{ gm204_grctx_init_tex_0 },
-	{ gm204_grctx_init_mpc_0 },
-	{ gm204_grctx_init_l1c_0 },
-	{ gm204_grctx_init_sm_0 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pes_0[] = {
-	{ 0x41be24,   1, 0x04, 0x0000000e },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_cbm_0[] = {
-	{ 0x41bec0,   1, 0x04, 0x00000000 },
-	{ 0x41bec4,   1, 0x04, 0x01030000 },
-	{ 0x41bee4,   1, 0x04, 0x00000000 },
-	{ 0x41bef0,   1, 0x04, 0x000003ff },
-	{ 0x41bef4,   2, 0x04, 0x00000000 },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_ppc[] = {
-	{ gm204_grctx_init_pes_0 },
-	{ gm204_grctx_init_cbm_0 },
-	{ gm107_grctx_init_wwdx_0 },
-	{}
-};
-
-/*******************************************************************************
- * PGRAPH context implementation
- ******************************************************************************/
-
-void
-gm204_grctx_generate_tpcid(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	int gpc, tpc, id;
-
-	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-			if (tpc < gr->tpc_nr[gpc]) {
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
-				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
-				id++;
-			}
-		}
-	}
-}
-
-static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
-	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
-void
-gm204_grctx_generate_405b60(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
-	u32 dist[TPC_MAX / 4] = {};
-	u32 gpcs[GPC_MAX] = {};
-	u8  tpcnr[GPC_MAX];
-	int tpc, gpc, i;
-
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-
-	/* won't result in the same distribution as the binary driver where
-	 * some of the gpcs have more tpcs than others, but this shall do
-	 * for the moment.  the code for earlier gpus has this issue too.
-	 */
-	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while(!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
-		gpcs[gpc] |= i << (tpc * 8);
-	}
-
-	for (i = 0; i < dist_nr; i++)
-		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
-	for (i = 0; i < gr->gpc_nr; i++)
-		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
-}
-
-void
-gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const struct gf100_grctx_func *grctx = gr->func->grctx;
-	u32 tmp;
-	int i;
-
-	gf100_gr_mmio(gr, grctx->hub);
-	gf100_gr_mmio(gr, grctx->gpc);
-	gf100_gr_mmio(gr, grctx->zcull);
-	gf100_gr_mmio(gr, grctx->tpc);
-	gf100_gr_mmio(gr, grctx->ppc);
-
-	nvkm_wr32(device, 0x404154, 0x00000000);
-
-	grctx->bundle(info);
-	grctx->pagepool(info);
-	grctx->attrib(info);
-	grctx->unkn(gr);
-
-	gm204_grctx_generate_tpcid(gr);
-	gf100_grctx_generate_r406028(gr);
-	gk104_grctx_generate_r418bb8(gr);
-
-	for (i = 0; i < 8; i++)
-		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-	nvkm_wr32(device, 0x406500, 0x00000000);
-
-	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-
-	gm204_grctx_generate_rop_active_fbps(gr);
-
-	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
-		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
-	nvkm_wr32(device, 0x4041c4, tmp);
-
-	gm204_grctx_generate_405b60(gr);
-
-	gf100_gr_icmd(gr, grctx->icmd);
-	nvkm_wr32(device, 0x404154, 0x00000800);
-	gf100_gr_mthd(gr, grctx->mthd);
-
-	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
-	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
-}
-
-const struct gf100_grctx_func
-gm204_grctx = {
-	.main  = gm204_grctx_generate_main,
-	.unkn  = gk104_grctx_generate_unkn,
-	.hub   = gm204_grctx_pack_hub,
-	.gpc   = gm204_grctx_pack_gpc,
-	.zcull = gf100_grctx_pack_zcull,
-	.tpc   = gm204_grctx_pack_tpc,
-	.ppc   = gm204_grctx_pack_ppc,
-	.icmd  = gm204_grctx_pack_icmd,
-	.mthd  = gm204_grctx_pack_mthd,
-	.bundle = gm107_grctx_generate_bundle,
-	.bundle_size = 0x3000,
-	.bundle_min_gpm_fifo_depth = 0x180,
-	.bundle_token_limit = 0x780,
-	.pagepool = gm107_grctx_generate_pagepool,
-	.pagepool_size = 0x20000,
-	.attrib = gm107_grctx_generate_attrib,
-	.attrib_nr_max = 0x600,
-	.attrib_nr = 0x400,
-	.alpha_nr_max = 0x1800,
-	.alpha_nr = 0x1000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
deleted file mode 100644
index d6be603..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "ctxgf100.h"
-
-static const struct gf100_gr_init
-gm206_grctx_init_gpc_unk_1[] = {
-	{ 0x418600,   1, 0x04, 0x0000007f },
-	{ 0x418684,   1, 0x04, 0x0000001f },
-	{ 0x418700,   1, 0x04, 0x00000002 },
-	{ 0x418704,   1, 0x04, 0x00000080 },
-	{ 0x418708,   1, 0x04, 0x40000000 },
-	{ 0x41870c,   2, 0x04, 0x00000000 },
-	{ 0x418728,   1, 0x04, 0x00300020 },
-	{}
-};
-
-static const struct gf100_gr_pack
-gm206_grctx_pack_gpc[] = {
-	{ gm107_grctx_init_gpc_unk_0 },
-	{ gm204_grctx_init_prop_0 },
-	{ gm206_grctx_init_gpc_unk_1 },
-	{ gm204_grctx_init_setup_0 },
-	{ gf100_grctx_init_zcull_0 },
-	{ gk208_grctx_init_crstr_0 },
-	{ gm204_grctx_init_gpm_0 },
-	{ gm204_grctx_init_gpc_unk_2 },
-	{ gf100_grctx_init_gcc_0 },
-	{}
-};
-
-const struct gf100_grctx_func
-gm206_grctx = {
-	.main  = gm204_grctx_generate_main,
-	.unkn  = gk104_grctx_generate_unkn,
-	.hub   = gm204_grctx_pack_hub,
-	.gpc   = gm206_grctx_pack_gpc,
-	.zcull = gf100_grctx_pack_zcull,
-	.tpc   = gm204_grctx_pack_tpc,
-	.ppc   = gm204_grctx_pack_ppc,
-	.icmd  = gm204_grctx_pack_icmd,
-	.mthd  = gm204_grctx_pack_mthd,
-	.bundle = gm107_grctx_generate_bundle,
-	.bundle_size = 0x3000,
-	.bundle_min_gpm_fifo_depth = 0x180,
-	.bundle_token_limit = 0x780,
-	.pagepool = gm107_grctx_generate_pagepool,
-	.pagepool_size = 0x20000,
-	.attrib = gm107_grctx_generate_attrib,
-	.attrib_nr_max = 0x600,
-	.attrib_nr = 0x400,
-	.alpha_nr_max = 0x1800,
-	.alpha_nr = 0x1000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index 6702604..a8827ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -54,7 +54,7 @@
 
 	grctx->unkn(gr);
 
-	gm204_grctx_generate_tpcid(gr);
+	gm200_grctx_generate_tpcid(gr);
 	gm20b_grctx_generate_r406028(gr);
 	gk104_grctx_generate_r418bb8(gr);
 
@@ -70,7 +70,7 @@
 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
 	nvkm_wr32(device, 0x4041c4, tmp);
 
-	gm204_grctx_generate_405b60(gr);
+	gm200_grctx_generate_405b60(gr);
 
 	gf100_gr_wait_idle(gr);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index e168b83..dc60509 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -322,6 +322,7 @@
 
 // interrupt handler
 ih:
+	push $r0
 	push $r8
 	mov $r8 $flags
 	push $r8
@@ -358,6 +359,7 @@
 	pop $r8
 	mov $flags $r8
 	pop $r8
+	pop $r0
 	bclr $flags $p0
 	iret
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 231f696..5f4ddfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -382,56 +382,57 @@
 	0xb60412fd,
 	0x1efd01e4,
 	0x0018fe05,
-	0x05b021f5,
+	0x05b421f5,
 /* 0x04eb: main_not_ctx_xfer */
 	0x94d30ef4,
 	0xf5f010ef,
 	0x7e21f501,
 	0xc60ef403,
 /* 0x04f8: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x00a7f104,
-	0x00a3f002,
-	0xc400aacf,
-	0x0bf404ab,
-	0x1cd7f02c,
-	0x1a00e7f1,
-	0xcf00e3f0,
-	0xf7f100ee,
-	0xf3f01900,
-	0x00ffcf00,
-	0xf00421f4,
-	0x07f101e7,
-	0x03f01d00,
-	0x000ed000,
-/* 0x0546: ih_no_fifo */
-	0x07f104bd,
-	0x03f00100,
-	0x000ad000,
-	0xf0fc04bd,
-	0xd0fce0fc,
-	0xa0fcb0fc,
-	0x80fc90fc,
-	0xfc0088fe,
-	0x0032f480,
-/* 0x056a: hub_barrier_done */
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0xf104bdf0,
+	0xf00200a7,
+	0xaacf00a3,
+	0x04abc400,
+	0xf02c0bf4,
+	0xe7f11cd7,
+	0xe3f01a00,
+	0x00eecf00,
+	0x1900f7f1,
+	0xcf00f3f0,
+	0x21f400ff,
+	0x01e7f004,
+	0x1d0007f1,
+	0xd00003f0,
+	0x04bd000e,
+/* 0x0548: ih_no_fifo */
+	0x010007f1,
+	0xd00003f0,
+	0x04bd000a,
+	0xe0fcf0fc,
+	0xb0fcd0fc,
+	0x90fca0fc,
+	0x88fe80fc,
+	0xfc80fc00,
+	0x0032f400,
+/* 0x056e: hub_barrier_done */
 	0xf7f001f8,
 	0x040e9801,
 	0xb904febb,
 	0xe7f102ff,
 	0xe3f09418,
 	0x9d21f440,
-/* 0x0582: ctx_redswitch */
+/* 0x0586: ctx_redswitch */
 	0xf7f000f8,
 	0x0007f120,
 	0x0103f085,
 	0xbd000fd0,
 	0x08e7f004,
-/* 0x0594: ctx_redswitch_delay */
+/* 0x0598: ctx_redswitch_delay */
 	0xf401e2b6,
 	0xf5f1fd1b,
 	0xf5f10800,
@@ -439,13 +440,13 @@
 	0x03f08500,
 	0x000fd001,
 	0x00f804bd,
-/* 0x05b0: ctx_xfer */
+/* 0x05b4: ctx_xfer */
 	0x810007f1,
 	0xd00203f0,
 	0x04bd000f,
 	0xf50711f4,
-/* 0x05c3: ctx_xfer_not_load */
-	0xf5058221,
+/* 0x05c7: ctx_xfer_not_load */
+	0xf5058621,
 	0xbd026a21,
 	0xfc07f124,
 	0x0203f047,
@@ -475,12 +476,11 @@
 	0x6f21f508,
 	0x5e21f501,
 	0x0601f402,
-/* 0x063b: ctx_xfer_post */
+/* 0x063f: ctx_xfer_post */
 	0xf50712f4,
-/* 0x063f: ctx_xfer_done */
+/* 0x0643: ctx_xfer_done */
 	0xf5027f21,
-	0xf8056a21,
-	0x00000000,
+	0xf8056e21,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index bb820ff..03381b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -408,56 +408,57 @@
 	0x0412fd20,
 	0xfd01e4b6,
 	0x18fe051e,
-	0xfd21f500,
-	0xd30ef405,
+	0x0121f500,
+	0xd30ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xfe80f9c6,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0xa7f104bd,
-	0xa3f00200,
-	0x00aacf00,
-	0xf404abc4,
-	0xd7f02c0b,
-	0x00e7f124,
-	0x00e3f01a,
-	0xf100eecf,
-	0xf01900f7,
-	0xffcf00f3,
-	0x0421f400,
-	0xf101e7f0,
-	0xf01d0007,
-	0x0ed00003,
-/* 0x0593: ih_no_fifo */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+	0xf900f9c6,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x05bb: hub_barrier_done */
 	0xf001f800,
 	0x0e9801f7,
 	0x04febb04,
 	0xf102ffb9,
 	0xf09418e7,
 	0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
 	0xf000f89d,
 	0x07f120f7,
 	0x03f08500,
 	0x000fd001,
 	0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
 	0x01e2b608,
 	0xf1fd1bf4,
 	0xf10800f5,
@@ -465,13 +466,13 @@
 	0xf0850007,
 	0x0fd00103,
 	0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
 	0x0007f100,
 	0x0203f081,
 	0xbd000fd0,
 	0x0711f404,
-	0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+	0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
 	0x026a21f5,
 	0x07f124bd,
 	0x03f047fc,
@@ -511,10 +512,10 @@
 	0x21f5016f,
 	0x01f4025e,
 	0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
 	0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
-	0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+	0x05bb21f5,
 	0x000000f8,
 	0x00000000,
 	0x00000000,
@@ -533,5 +534,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 911976d..99d9b48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -408,56 +408,57 @@
 	0x0412fd20,
 	0xfd01e4b6,
 	0x18fe051e,
-	0xfd21f500,
-	0xd30ef405,
+	0x0121f500,
+	0xd30ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xfe80f9c6,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0xa7f104bd,
-	0xa3f00200,
-	0x00aacf00,
-	0xf404abc4,
-	0xd7f02c0b,
-	0x00e7f124,
-	0x00e3f01a,
-	0xf100eecf,
-	0xf01900f7,
-	0xffcf00f3,
-	0x0421f400,
-	0xf101e7f0,
-	0xf01d0007,
-	0x0ed00003,
-/* 0x0593: ih_no_fifo */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+	0xf900f9c6,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x05bb: hub_barrier_done */
 	0xf001f800,
 	0x0e9801f7,
 	0x04febb04,
 	0xf102ffb9,
 	0xf09418e7,
 	0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
 	0xf000f89d,
 	0x07f120f7,
 	0x03f08500,
 	0x000fd001,
 	0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
 	0x01e2b608,
 	0xf1fd1bf4,
 	0xf10800f5,
@@ -465,13 +466,13 @@
 	0xf0850007,
 	0x0fd00103,
 	0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
 	0x0007f100,
 	0x0203f081,
 	0xbd000fd0,
 	0x0711f404,
-	0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+	0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
 	0x026a21f5,
 	0x07f124bd,
 	0x03f047fc,
@@ -511,10 +512,10 @@
 	0x21f5016f,
 	0x01f4025e,
 	0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
 	0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
-	0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+	0x05bb21f5,
 	0x000000f8,
 	0x00000000,
 	0x00000000,
@@ -533,5 +534,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 1c6e11b..f726769 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -408,56 +408,57 @@
 	0x0412fd20,
 	0xfd01e4b6,
 	0x18fe051e,
-	0xfd21f500,
-	0xd30ef405,
+	0x0121f500,
+	0xd30ef406,
 /* 0x0538: main_not_ctx_xfer */
 	0xf010ef94,
 	0x21f501f5,
 	0x0ef4037e,
 /* 0x0545: ih */
-	0xfe80f9c6,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0xa7f104bd,
-	0xa3f00200,
-	0x00aacf00,
-	0xf404abc4,
-	0xd7f02c0b,
-	0x00e7f124,
-	0x00e3f01a,
-	0xf100eecf,
-	0xf01900f7,
-	0xffcf00f3,
-	0x0421f400,
-	0xf101e7f0,
-	0xf01d0007,
-	0x0ed00003,
-/* 0x0593: ih_no_fifo */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+	0xf900f9c6,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0x0200a7f1,
+	0xcf00a3f0,
+	0xabc400aa,
+	0x2c0bf404,
+	0xf124d7f0,
+	0xf01a00e7,
+	0xeecf00e3,
+	0x00f7f100,
+	0x00f3f019,
+	0xf400ffcf,
+	0xe7f00421,
+	0x0007f101,
+	0x0003f01d,
+	0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x05bb: hub_barrier_done */
 	0xf001f800,
 	0x0e9801f7,
 	0x04febb04,
 	0xf102ffb9,
 	0xf09418e7,
 	0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
 	0xf000f89d,
 	0x07f120f7,
 	0x03f08500,
 	0x000fd001,
 	0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
 	0x01e2b608,
 	0xf1fd1bf4,
 	0xf10800f5,
@@ -465,13 +466,13 @@
 	0xf0850007,
 	0x0fd00103,
 	0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
 	0x0007f100,
 	0x0203f081,
 	0xbd000fd0,
 	0x0711f404,
-	0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+	0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
 	0x026a21f5,
 	0x07f124bd,
 	0x03f047fc,
@@ -511,10 +512,10 @@
 	0x21f5016f,
 	0x01f4025e,
 	0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
 	0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
-	0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+	0x05bb21f5,
 	0x000000f8,
 	0x00000000,
 	0x00000000,
@@ -533,5 +534,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 84af7ec..387d1fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -360,61 +360,62 @@
 	0xb60412fd,
 	0x1efd01e4,
 	0x0018fe05,
-	0x00051b7e,
+	0x00051f7e,
 /* 0x0477: main_not_ctx_xfer */
 	0x94d40ef4,
 	0xf5f010ef,
 	0x02f87e01,
 	0xc70ef400,
 /* 0x0484: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x02004a04,
-	0xc400aacf,
-	0x0bf404ab,
-	0x4e240d1f,
-	0xeecf1a00,
-	0x19004f00,
-	0x7e00ffcf,
-	0x0e000004,
-	0x1d004001,
-	0xbd000ef6,
-/* 0x04c1: ih_no_fifo */
-	0x01004004,
-	0xbd000af6,
-	0xfcf0fc04,
-	0xfcd0fce0,
-	0xfca0fcb0,
-	0xfe80fc90,
-	0x80fc0088,
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0x4a04bdf0,
+	0xaacf0200,
+	0x04abc400,
+	0x0d1f0bf4,
+	0x1a004e24,
+	0x4f00eecf,
+	0xffcf1900,
+	0x00047e00,
+	0x40010e00,
+	0x0ef61d00,
+/* 0x04c3: ih_no_fifo */
+	0x4004bd00,
+	0x0af60100,
+	0xfc04bd00,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x00fc80fc,
 	0xf80032f4,
-/* 0x04e1: hub_barrier_done */
+/* 0x04e5: hub_barrier_done */
 	0x98010f01,
 	0xfebb040e,
 	0x8effb204,
 	0x7e409418,
 	0xf800008f,
-/* 0x04f5: ctx_redswitch */
+/* 0x04f9: ctx_redswitch */
 	0x80200f00,
 	0xf6018500,
 	0x04bd000f,
-/* 0x0502: ctx_redswitch_delay */
+/* 0x0506: ctx_redswitch_delay */
 	0xe2b6080e,
 	0xfd1bf401,
 	0x0800f5f1,
 	0x0200f5f1,
 	0x01850080,
 	0xbd000ff6,
-/* 0x051b: ctx_xfer */
+/* 0x051f: ctx_xfer */
 	0x8000f804,
 	0xf6028100,
 	0x04bd000f,
 	0x7e0711f4,
-/* 0x052b: ctx_xfer_not_load */
-	0x7e0004f5,
+/* 0x052f: ctx_xfer_not_load */
+	0x7e0004f9,
 	0xbd000216,
 	0x47fc8024,
 	0x0002f602,
@@ -449,10 +450,10 @@
 	0x7e00013d,
 	0xf400020a,
 	0x12f40601,
-/* 0x05b5: ctx_xfer_post */
+/* 0x05b9: ctx_xfer_post */
 	0x02277e07,
-/* 0x05b9: ctx_xfer_done */
-	0x04e17e00,
+/* 0x05bd: ctx_xfer_done */
+	0x04e57e00,
 	0x0000f800,
 	0x00000000,
 	0x00000000,
@@ -469,5 +470,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 5136f91..fa9f3c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -438,48 +438,49 @@
 	0x0412fd20,
 	0xfd01e4b6,
 	0x18fe051e,
-	0x06447e00,
+	0x06487e00,
 	0xd40ef400,
 /* 0x05a0: main_not_ctx_xfer */
 	0xf010ef94,
 	0xf87e01f5,
 	0x0ef40002,
 /* 0x05ad: ih */
-	0xfe80f9c7,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x004a04bd,
-	0x00aacf02,
-	0xf404abc4,
-	0x240d1f0b,
-	0xcf1a004e,
-	0x004f00ee,
-	0x00ffcf19,
-	0x0000047e,
-	0x0040010e,
-	0x000ef61d,
-/* 0x05ea: ih_no_fifo */
-	0x004004bd,
-	0x000af601,
-	0xf0fc04bd,
-	0xd0fce0fc,
-	0xa0fcb0fc,
-	0x80fc90fc,
-	0xfc0088fe,
-	0x0032f480,
-/* 0x060a: hub_barrier_done */
+	0xf900f9c7,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0xcf02004a,
+	0xabc400aa,
+	0x1f0bf404,
+	0x004e240d,
+	0x00eecf1a,
+	0xcf19004f,
+	0x047e00ff,
+	0x010e0000,
+	0xf61d0040,
+	0x04bd000e,
+/* 0x05ec: ih_no_fifo */
+	0xf6010040,
+	0x04bd000a,
+	0xe0fcf0fc,
+	0xb0fcd0fc,
+	0x90fca0fc,
+	0x88fe80fc,
+	0xfc80fc00,
+	0x0032f400,
+/* 0x060e: hub_barrier_done */
 	0x010f01f8,
 	0xbb040e98,
 	0xffb204fe,
 	0x4094188e,
 	0x00008f7e,
-/* 0x061e: ctx_redswitch */
+/* 0x0622: ctx_redswitch */
 	0x200f00f8,
 	0x01850080,
 	0xbd000ff6,
-/* 0x062b: ctx_redswitch_delay */
+/* 0x062f: ctx_redswitch_delay */
 	0xb6080e04,
 	0x1bf401e2,
 	0x00f5f1fd,
@@ -487,15 +488,15 @@
 	0x85008002,
 	0x000ff601,
 	0x00f804bd,
-/* 0x0644: ctx_xfer */
+/* 0x0648: ctx_xfer */
 	0x02810080,
 	0xbd000ff6,
 	0x1dc48e04,
 	0x01e5f050,
 	0x8f7effb2,
 	0x11f40000,
-	0x061e7e07,
-/* 0x0661: ctx_xfer_not_load */
+	0x06227e07,
+/* 0x0665: ctx_xfer_not_load */
 	0x02167e00,
 	0x8024bd00,
 	0xf60247fc,
@@ -550,15 +551,15 @@
 	0x7e00020a,
 	0xf4000314,
 	0x12f40601,
-/* 0x0739: ctx_xfer_post */
+/* 0x073d: ctx_xfer_post */
 	0x02277e1a,
 	0x8e0d0f00,
 	0xf0501da8,
 	0xffb201e5,
 	0x00008f7e,
 	0x0003147e,
-/* 0x0750: ctx_xfer_done */
-	0x00060a7e,
+/* 0x0754: ctx_xfer_done */
+	0x00060e7e,
 	0x000000f8,
 	0x00000000,
 	0x00000000,
@@ -601,5 +602,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
index 87f99e3..e3a2fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
@@ -306,6 +306,7 @@
 
 // interrupt handler
 ih:
+	push $r0
 	push $r8
 	mov $r8 $flags
 	push $r8
@@ -380,6 +381,7 @@
 	pop $r8
 	mov $flags $r8
 	pop $r8
+	pop $r0
 	bclr $flags $p0
 	iret
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index f6acda5..397921a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -528,10 +528,10 @@
 	0x0001d001,
 	0x17f104bd,
 	0xf7f00100,
-	0x0d21f502,
-	0x1f21f508,
+	0x1121f502,
+	0x2321f508,
 	0x10f7f008,
-	0x086c21f5,
+	0x087021f5,
 	0x98000e98,
 	0x21f5010f,
 	0x14950150,
@@ -574,9 +574,9 @@
 	0xb6800040,
 	0x1bf40132,
 	0x00f7f0be,
-	0x086c21f5,
+	0x087021f5,
 	0xf500f7f0,
-	0xf1080d21,
+	0xf1081121,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -610,7 +610,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x31f40132,
-	0x4021f502,
+	0x4421f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -621,7 +621,7 @@
 	0x0203f00f,
 	0xbd0009d0,
 	0x0131f404,
-	0x0a4021f5,
+	0x0a4421f5,
 	0x99f094bd,
 	0x0007f106,
 	0x0203f017,
@@ -631,7 +631,7 @@
 	0x12b920f9,
 	0x0132f402,
 	0xf50232f4,
-	0xfc0a4021,
+	0xfc0a4421,
 	0x0007f120,
 	0x0203f0c0,
 	0xbd0002d0,
@@ -640,7 +640,7 @@
 	0xf41f23c8,
 	0x31f40d0b,
 	0x0232f401,
-	0x0a4021f5,
+	0x0a4421f5,
 /* 0x063c: chsw_done */
 	0xf10127f0,
 	0xf0c30007,
@@ -654,7 +654,7 @@
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
-	0xd021f502,
+	0xd421f502,
 	0x460ef409,
 /* 0x0670: main_not_ctx_chan */
 	0xf402e4b0,
@@ -664,7 +664,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x32f40132,
-	0x4021f502,
+	0x4421f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -682,107 +682,108 @@
 	0x04bd0002,
 	0xfea00ef5,
 /* 0x06c8: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x00a7f104,
-	0x00a3f002,
-	0xc400aacf,
-	0x0bf404ab,
-	0x10d7f030,
-	0x1a00e7f1,
-	0xcf00e3f0,
-	0xf7f100ee,
-	0xf3f01900,
-	0x00ffcf00,
-	0xb70421f4,
-	0xf00400b0,
-	0x07f101e7,
-	0x03f01d00,
-	0x000ed000,
-/* 0x071a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x10d7f00d,
-	0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
-	0xe40421f4,
-	0xf40400ab,
-	0xe7f16c0b,
-	0xe3f00708,
-	0x6821f440,
-	0xf102ffb9,
-	0xf0040007,
-	0x0fd00203,
-	0xf104bd00,
-	0xf00704e7,
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0xf104bdf0,
+	0xf00200a7,
+	0xaacf00a3,
+	0x04abc400,
+	0xf0300bf4,
+	0xe7f110d7,
+	0xe3f01a00,
+	0x00eecf00,
+	0x1900f7f1,
+	0xcf00f3f0,
+	0x21f400ff,
+	0x00b0b704,
+	0x01e7f004,
+	0x1d0007f1,
+	0xd00003f0,
+	0x04bd000e,
+/* 0x071c: ih_no_fifo */
+	0x0100abe4,
+	0xf00d0bf4,
+	0xe7f110d7,
+	0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+	0x00abe404,
+	0x6c0bf404,
+	0x0708e7f1,
+	0xf440e3f0,
+	0xffb96821,
+	0x0007f102,
+	0x0203f004,
+	0xbd000fd0,
+	0x04e7f104,
+	0x40e3f007,
+	0xb96821f4,
+	0x07f102ff,
+	0x03f00300,
+	0x000fd002,
+	0xfec704bd,
+	0x02ee9450,
+	0x0700f7f1,
+	0xbb40f3f0,
+	0x21f400ef,
+	0x0007f168,
+	0x0203f002,
+	0xbd000fd0,
+	0x03f7f004,
+	0x037e21f5,
+	0x0100b7f1,
+	0xf102bfb9,
+	0xf00144e7,
 	0x21f440e3,
-	0x02ffb968,
-	0x030007f1,
-	0xd00203f0,
-	0x04bd000f,
-	0x9450fec7,
-	0xf7f102ee,
-	0xf3f00700,
-	0x00efbb40,
-	0xf16821f4,
-	0xf0020007,
-	0x0fd00203,
-	0xf004bd00,
-	0x21f503f7,
-	0xb7f1037e,
-	0xbfb90100,
-	0x44e7f102,
-	0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
-	0xf19d21f4,
-	0xbd0504b7,
-	0xb4abffb0,
-	0xf10f0bf4,
-	0xf0070007,
-	0x0bd00303,
-/* 0x07b3: ih_no_other */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x07d7: ctx_4160s */
+/* 0x079d: ih_no_fwmthd */
+	0x04b7f19d,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0007f10f,
+	0x0303f007,
+	0xbd000bd0,
+/* 0x07b5: ih_no_other */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x07db: ctx_4160s */
 	0xf001f800,
 	0xffb901f7,
 	0x60e7f102,
 	0x40e3f041,
-/* 0x07e7: ctx_4160s_wait */
+/* 0x07eb: ctx_4160s_wait */
 	0xf19d21f4,
 	0xf04160e7,
 	0x21f440e3,
 	0x02ffb968,
 	0xf404ffc8,
 	0x00f8f00b,
-/* 0x07fc: ctx_4160c */
+/* 0x0800: ctx_4160c */
 	0xffb9f4bd,
 	0x60e7f102,
 	0x40e3f041,
 	0xf89d21f4,
-/* 0x080d: ctx_4170s */
+/* 0x0811: ctx_4170s */
 	0x10f5f000,
 	0xf102ffb9,
 	0xf04170e7,
 	0x21f440e3,
-/* 0x081f: ctx_4170w */
+/* 0x0823: ctx_4170w */
 	0xf100f89d,
 	0xf04170e7,
 	0x21f440e3,
 	0x02ffb968,
 	0xf410f4f0,
 	0x00f8f01b,
-/* 0x0834: ctx_redswitch */
+/* 0x0838: ctx_redswitch */
 	0x0200e7f1,
 	0xf040e5f0,
 	0xe5f020e5,
@@ -790,7 +791,7 @@
 	0x0103f085,
 	0xbd000ed0,
 	0x08f7f004,
-/* 0x0850: ctx_redswitch_delay */
+/* 0x0854: ctx_redswitch_delay */
 	0xf401f2b6,
 	0xe5f1fd1b,
 	0xe5f10400,
@@ -798,7 +799,7 @@
 	0x03f08500,
 	0x000ed001,
 	0x00f804bd,
-/* 0x086c: ctx_86c */
+/* 0x0870: ctx_86c */
 	0x1b0007f1,
 	0xd00203f0,
 	0x04bd000f,
@@ -809,16 +810,16 @@
 	0xa86ce7f1,
 	0xf441e3f0,
 	0x00f89d21,
-/* 0x0894: ctx_mem */
+/* 0x0898: ctx_mem */
 	0x840007f1,
 	0xd00203f0,
 	0x04bd000f,
-/* 0x08a0: ctx_mem_wait */
+/* 0x08a4: ctx_mem_wait */
 	0x8400f7f1,
 	0xcf02f3f0,
 	0xfffd00ff,
 	0xf31bf405,
-/* 0x08b2: ctx_load */
+/* 0x08b6: ctx_load */
 	0x94bd00f8,
 	0xf10599f0,
 	0xf00f0007,
@@ -836,7 +837,7 @@
 	0x02d00203,
 	0xf004bd00,
 	0x21f507f7,
-	0x07f10894,
+	0x07f10898,
 	0x03f0c000,
 	0x0002d002,
 	0x0bfe04bd,
@@ -891,31 +892,31 @@
 	0x03f01700,
 	0x0009d002,
 	0x00f804bd,
-/* 0x09d0: ctx_chan */
-	0x07d721f5,
-	0x08b221f5,
+/* 0x09d4: ctx_chan */
+	0x07db21f5,
+	0x08b621f5,
 	0xf40ca7f0,
 	0xf7f0d021,
-	0x9421f505,
-	0xfc21f508,
-/* 0x09eb: ctx_mmio_exec */
-	0x9800f807,
+	0x9821f505,
+	0x0021f508,
+/* 0x09ef: ctx_mmio_exec */
+	0x9800f808,
 	0x07f14103,
 	0x03f08100,
 	0x0003d002,
 	0x34bd04bd,
-/* 0x09fc: ctx_mmio_loop */
+/* 0x0a00: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x57f10f1b,
 	0x53f00200,
 	0x0535fa06,
-/* 0x0a0e: ctx_mmio_pull */
+/* 0x0a12: ctx_mmio_pull */
 	0x4e9803f8,
 	0x814f9880,
 	0xb69d21f4,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0a20: ctx_mmio_done */
+/* 0x0a24: ctx_mmio_done */
 	0xf1160398,
 	0xf0810007,
 	0x03d00203,
@@ -924,30 +925,30 @@
 	0x13f00100,
 	0x0601fa06,
 	0x00f803f8,
-/* 0x0a40: ctx_xfer */
+/* 0x0a44: ctx_xfer */
 	0xf104e7f0,
 	0xf0020007,
 	0x0ed00303,
-/* 0x0a4f: ctx_xfer_idle */
+/* 0x0a53: ctx_xfer_idle */
 	0xf104bd00,
 	0xf00000e7,
 	0xeecf03e3,
 	0x00e4f100,
 	0xf21bf420,
 	0xf40611f4,
-/* 0x0a66: ctx_xfer_pre */
+/* 0x0a6a: ctx_xfer_pre */
 	0xf7f01102,
-	0x6c21f510,
-	0xd721f508,
+	0x7021f510,
+	0xdb21f508,
 	0x1c11f407,
-/* 0x0a74: ctx_xfer_pre_load */
+/* 0x0a78: ctx_xfer_pre_load */
 	0xf502f7f0,
-	0xf5080d21,
-	0xf5081f21,
-	0xbd083421,
-	0x0d21f5f4,
-	0xb221f508,
-/* 0x0a8d: ctx_xfer_exec */
+	0xf5081121,
+	0xf5082321,
+	0xbd083821,
+	0x1121f5f4,
+	0xb621f508,
+/* 0x0a91: ctx_xfer_exec */
 	0x16019808,
 	0x07f124bd,
 	0x03f00500,
@@ -982,24 +983,23 @@
 	0x1301f402,
 	0xf40ca7f0,
 	0xf7f0d021,
-	0x9421f505,
+	0x9821f505,
 	0x3202f408,
-/* 0x0b1c: ctx_xfer_post */
+/* 0x0b20: ctx_xfer_post */
 	0xf502f7f0,
-	0xbd080d21,
-	0x6c21f5f4,
+	0xbd081121,
+	0x7021f5f4,
 	0x7f21f508,
-	0x1f21f502,
+	0x2321f502,
 	0xf5f4bd08,
-	0xf4080d21,
+	0xf4081121,
 	0x01981011,
 	0x0511fd40,
 	0xf5070bf4,
-/* 0x0b47: ctx_xfer_no_post_mmio */
-	0xf509eb21,
-/* 0x0b4b: ctx_xfer_done */
-	0xf807fc21,
-	0x00000000,
+/* 0x0b4b: ctx_xfer_no_post_mmio */
+	0xf509ef21,
+/* 0x0b4f: ctx_xfer_done */
+	0xf8080021,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 7cb14e5..50c9716 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -528,10 +528,10 @@
 	0x0001d001,
 	0x17f104bd,
 	0xf7f00100,
-	0x0d21f502,
-	0x1f21f508,
+	0x1121f502,
+	0x2321f508,
 	0x10f7f008,
-	0x086c21f5,
+	0x087021f5,
 	0x98000e98,
 	0x21f5010f,
 	0x14950150,
@@ -574,9 +574,9 @@
 	0xb6800040,
 	0x1bf40132,
 	0x00f7f0be,
-	0x086c21f5,
+	0x087021f5,
 	0xf500f7f0,
-	0xf1080d21,
+	0xf1081121,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -610,7 +610,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x31f40132,
-	0x4021f502,
+	0x4421f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -621,7 +621,7 @@
 	0x0203f00f,
 	0xbd0009d0,
 	0x0131f404,
-	0x0a4021f5,
+	0x0a4421f5,
 	0x99f094bd,
 	0x0007f106,
 	0x0203f017,
@@ -631,7 +631,7 @@
 	0x12b920f9,
 	0x0132f402,
 	0xf50232f4,
-	0xfc0a4021,
+	0xfc0a4421,
 	0x0007f120,
 	0x0203f0c0,
 	0xbd0002d0,
@@ -640,7 +640,7 @@
 	0xf41f23c8,
 	0x31f40d0b,
 	0x0232f401,
-	0x0a4021f5,
+	0x0a4421f5,
 /* 0x063c: chsw_done */
 	0xf10127f0,
 	0xf0c30007,
@@ -654,7 +654,7 @@
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
-	0xd021f502,
+	0xd421f502,
 	0x460ef409,
 /* 0x0670: main_not_ctx_chan */
 	0xf402e4b0,
@@ -664,7 +664,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x32f40132,
-	0x4021f502,
+	0x4421f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -682,107 +682,108 @@
 	0x04bd0002,
 	0xfea00ef5,
 /* 0x06c8: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x00a7f104,
-	0x00a3f002,
-	0xc400aacf,
-	0x0bf404ab,
-	0x10d7f030,
-	0x1a00e7f1,
-	0xcf00e3f0,
-	0xf7f100ee,
-	0xf3f01900,
-	0x00ffcf00,
-	0xb70421f4,
-	0xf00400b0,
-	0x07f101e7,
-	0x03f01d00,
-	0x000ed000,
-/* 0x071a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x10d7f00d,
-	0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
-	0xe40421f4,
-	0xf40400ab,
-	0xe7f16c0b,
-	0xe3f00708,
-	0x6821f440,
-	0xf102ffb9,
-	0xf0040007,
-	0x0fd00203,
-	0xf104bd00,
-	0xf00704e7,
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0xf104bdf0,
+	0xf00200a7,
+	0xaacf00a3,
+	0x04abc400,
+	0xf0300bf4,
+	0xe7f110d7,
+	0xe3f01a00,
+	0x00eecf00,
+	0x1900f7f1,
+	0xcf00f3f0,
+	0x21f400ff,
+	0x00b0b704,
+	0x01e7f004,
+	0x1d0007f1,
+	0xd00003f0,
+	0x04bd000e,
+/* 0x071c: ih_no_fifo */
+	0x0100abe4,
+	0xf00d0bf4,
+	0xe7f110d7,
+	0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+	0x00abe404,
+	0x6c0bf404,
+	0x0708e7f1,
+	0xf440e3f0,
+	0xffb96821,
+	0x0007f102,
+	0x0203f004,
+	0xbd000fd0,
+	0x04e7f104,
+	0x40e3f007,
+	0xb96821f4,
+	0x07f102ff,
+	0x03f00300,
+	0x000fd002,
+	0xfec704bd,
+	0x02ee9450,
+	0x0700f7f1,
+	0xbb40f3f0,
+	0x21f400ef,
+	0x0007f168,
+	0x0203f002,
+	0xbd000fd0,
+	0x03f7f004,
+	0x037e21f5,
+	0x0100b7f1,
+	0xf102bfb9,
+	0xf00144e7,
 	0x21f440e3,
-	0x02ffb968,
-	0x030007f1,
-	0xd00203f0,
-	0x04bd000f,
-	0x9450fec7,
-	0xf7f102ee,
-	0xf3f00700,
-	0x00efbb40,
-	0xf16821f4,
-	0xf0020007,
-	0x0fd00203,
-	0xf004bd00,
-	0x21f503f7,
-	0xb7f1037e,
-	0xbfb90100,
-	0x44e7f102,
-	0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
-	0xf19d21f4,
-	0xbd0504b7,
-	0xb4abffb0,
-	0xf10f0bf4,
-	0xf0070007,
-	0x0bd00303,
-/* 0x07b3: ih_no_other */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x07d7: ctx_4160s */
+/* 0x079d: ih_no_fwmthd */
+	0x04b7f19d,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0007f10f,
+	0x0303f007,
+	0xbd000bd0,
+/* 0x07b5: ih_no_other */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x07db: ctx_4160s */
 	0xf001f800,
 	0xffb901f7,
 	0x60e7f102,
 	0x40e3f041,
-/* 0x07e7: ctx_4160s_wait */
+/* 0x07eb: ctx_4160s_wait */
 	0xf19d21f4,
 	0xf04160e7,
 	0x21f440e3,
 	0x02ffb968,
 	0xf404ffc8,
 	0x00f8f00b,
-/* 0x07fc: ctx_4160c */
+/* 0x0800: ctx_4160c */
 	0xffb9f4bd,
 	0x60e7f102,
 	0x40e3f041,
 	0xf89d21f4,
-/* 0x080d: ctx_4170s */
+/* 0x0811: ctx_4170s */
 	0x10f5f000,
 	0xf102ffb9,
 	0xf04170e7,
 	0x21f440e3,
-/* 0x081f: ctx_4170w */
+/* 0x0823: ctx_4170w */
 	0xf100f89d,
 	0xf04170e7,
 	0x21f440e3,
 	0x02ffb968,
 	0xf410f4f0,
 	0x00f8f01b,
-/* 0x0834: ctx_redswitch */
+/* 0x0838: ctx_redswitch */
 	0x0200e7f1,
 	0xf040e5f0,
 	0xe5f020e5,
@@ -790,7 +791,7 @@
 	0x0103f085,
 	0xbd000ed0,
 	0x08f7f004,
-/* 0x0850: ctx_redswitch_delay */
+/* 0x0854: ctx_redswitch_delay */
 	0xf401f2b6,
 	0xe5f1fd1b,
 	0xe5f10400,
@@ -798,7 +799,7 @@
 	0x03f08500,
 	0x000ed001,
 	0x00f804bd,
-/* 0x086c: ctx_86c */
+/* 0x0870: ctx_86c */
 	0x1b0007f1,
 	0xd00203f0,
 	0x04bd000f,
@@ -809,16 +810,16 @@
 	0xa86ce7f1,
 	0xf441e3f0,
 	0x00f89d21,
-/* 0x0894: ctx_mem */
+/* 0x0898: ctx_mem */
 	0x840007f1,
 	0xd00203f0,
 	0x04bd000f,
-/* 0x08a0: ctx_mem_wait */
+/* 0x08a4: ctx_mem_wait */
 	0x8400f7f1,
 	0xcf02f3f0,
 	0xfffd00ff,
 	0xf31bf405,
-/* 0x08b2: ctx_load */
+/* 0x08b6: ctx_load */
 	0x94bd00f8,
 	0xf10599f0,
 	0xf00f0007,
@@ -836,7 +837,7 @@
 	0x02d00203,
 	0xf004bd00,
 	0x21f507f7,
-	0x07f10894,
+	0x07f10898,
 	0x03f0c000,
 	0x0002d002,
 	0x0bfe04bd,
@@ -891,31 +892,31 @@
 	0x03f01700,
 	0x0009d002,
 	0x00f804bd,
-/* 0x09d0: ctx_chan */
-	0x07d721f5,
-	0x08b221f5,
+/* 0x09d4: ctx_chan */
+	0x07db21f5,
+	0x08b621f5,
 	0xf40ca7f0,
 	0xf7f0d021,
-	0x9421f505,
-	0xfc21f508,
-/* 0x09eb: ctx_mmio_exec */
-	0x9800f807,
+	0x9821f505,
+	0x0021f508,
+/* 0x09ef: ctx_mmio_exec */
+	0x9800f808,
 	0x07f14103,
 	0x03f08100,
 	0x0003d002,
 	0x34bd04bd,
-/* 0x09fc: ctx_mmio_loop */
+/* 0x0a00: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x57f10f1b,
 	0x53f00200,
 	0x0535fa06,
-/* 0x0a0e: ctx_mmio_pull */
+/* 0x0a12: ctx_mmio_pull */
 	0x4e9803f8,
 	0x814f9880,
 	0xb69d21f4,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0a20: ctx_mmio_done */
+/* 0x0a24: ctx_mmio_done */
 	0xf1160398,
 	0xf0810007,
 	0x03d00203,
@@ -924,30 +925,30 @@
 	0x13f00100,
 	0x0601fa06,
 	0x00f803f8,
-/* 0x0a40: ctx_xfer */
+/* 0x0a44: ctx_xfer */
 	0xf104e7f0,
 	0xf0020007,
 	0x0ed00303,
-/* 0x0a4f: ctx_xfer_idle */
+/* 0x0a53: ctx_xfer_idle */
 	0xf104bd00,
 	0xf00000e7,
 	0xeecf03e3,
 	0x00e4f100,
 	0xf21bf420,
 	0xf40611f4,
-/* 0x0a66: ctx_xfer_pre */
+/* 0x0a6a: ctx_xfer_pre */
 	0xf7f01102,
-	0x6c21f510,
-	0xd721f508,
+	0x7021f510,
+	0xdb21f508,
 	0x1c11f407,
-/* 0x0a74: ctx_xfer_pre_load */
+/* 0x0a78: ctx_xfer_pre_load */
 	0xf502f7f0,
-	0xf5080d21,
-	0xf5081f21,
-	0xbd083421,
-	0x0d21f5f4,
-	0xb221f508,
-/* 0x0a8d: ctx_xfer_exec */
+	0xf5081121,
+	0xf5082321,
+	0xbd083821,
+	0x1121f5f4,
+	0xb621f508,
+/* 0x0a91: ctx_xfer_exec */
 	0x16019808,
 	0x07f124bd,
 	0x03f00500,
@@ -982,24 +983,23 @@
 	0x1301f402,
 	0xf40ca7f0,
 	0xf7f0d021,
-	0x9421f505,
+	0x9821f505,
 	0x3202f408,
-/* 0x0b1c: ctx_xfer_post */
+/* 0x0b20: ctx_xfer_post */
 	0xf502f7f0,
-	0xbd080d21,
-	0x6c21f5f4,
+	0xbd081121,
+	0x7021f5f4,
 	0x7f21f508,
-	0x1f21f502,
+	0x2321f502,
 	0xf5f4bd08,
-	0xf4080d21,
+	0xf4081121,
 	0x01981011,
 	0x0511fd40,
 	0xf5070bf4,
-/* 0x0b47: ctx_xfer_no_post_mmio */
-	0xf509eb21,
-/* 0x0b4b: ctx_xfer_done */
-	0xf807fc21,
-	0x00000000,
+/* 0x0b4b: ctx_xfer_no_post_mmio */
+	0xf509ef21,
+/* 0x0b4f: ctx_xfer_done */
+	0xf8080021,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 95ac151..125824b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -528,10 +528,10 @@
 	0x0001d001,
 	0x17f104bd,
 	0xf7f00100,
-	0xd721f502,
-	0xe921f507,
+	0xdb21f502,
+	0xed21f507,
 	0x10f7f007,
-	0x083621f5,
+	0x083a21f5,
 	0x98000e98,
 	0x21f5010f,
 	0x14950150,
@@ -574,9 +574,9 @@
 	0xb6800040,
 	0x1bf40132,
 	0x00f7f0be,
-	0x083621f5,
+	0x083a21f5,
 	0xf500f7f0,
-	0xf107d721,
+	0xf107db21,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -610,7 +610,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x31f40132,
-	0x0221f502,
+	0x0621f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -621,7 +621,7 @@
 	0x0203f00f,
 	0xbd0009d0,
 	0x0131f404,
-	0x0a0221f5,
+	0x0a0621f5,
 	0x99f094bd,
 	0x0007f106,
 	0x0203f017,
@@ -631,7 +631,7 @@
 	0x12b920f9,
 	0x0132f402,
 	0xf50232f4,
-	0xfc0a0221,
+	0xfc0a0621,
 	0x0007f120,
 	0x0203f0c0,
 	0xbd0002d0,
@@ -640,7 +640,7 @@
 	0xf41f23c8,
 	0x31f40d0b,
 	0x0232f401,
-	0x0a0221f5,
+	0x0a0621f5,
 /* 0x063c: chsw_done */
 	0xf10127f0,
 	0xf0c30007,
@@ -654,7 +654,7 @@
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
-	0x9a21f502,
+	0x9e21f502,
 	0x460ef409,
 /* 0x0670: main_not_ctx_chan */
 	0xf402e4b0,
@@ -664,7 +664,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x32f40132,
-	0x0221f502,
+	0x0621f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -682,90 +682,91 @@
 	0x04bd0002,
 	0xfea00ef5,
 /* 0x06c8: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x00a7f104,
-	0x00a3f002,
-	0xc400aacf,
-	0x0bf404ab,
-	0x10d7f030,
-	0x1a00e7f1,
-	0xcf00e3f0,
-	0xf7f100ee,
-	0xf3f01900,
-	0x00ffcf00,
-	0xb70421f4,
-	0xf00400b0,
-	0x07f101e7,
-	0x03f01d00,
-	0x000ed000,
-/* 0x071a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x10d7f00d,
-	0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
-	0xe40421f4,
-	0xf40400ab,
-	0xe7f16c0b,
-	0xe3f00708,
-	0x6821f440,
-	0xf102ffb9,
-	0xf0040007,
-	0x0fd00203,
-	0xf104bd00,
-	0xf00704e7,
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0xf104bdf0,
+	0xf00200a7,
+	0xaacf00a3,
+	0x04abc400,
+	0xf0300bf4,
+	0xe7f110d7,
+	0xe3f01a00,
+	0x00eecf00,
+	0x1900f7f1,
+	0xcf00f3f0,
+	0x21f400ff,
+	0x00b0b704,
+	0x01e7f004,
+	0x1d0007f1,
+	0xd00003f0,
+	0x04bd000e,
+/* 0x071c: ih_no_fifo */
+	0x0100abe4,
+	0xf00d0bf4,
+	0xe7f110d7,
+	0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+	0x00abe404,
+	0x6c0bf404,
+	0x0708e7f1,
+	0xf440e3f0,
+	0xffb96821,
+	0x0007f102,
+	0x0203f004,
+	0xbd000fd0,
+	0x04e7f104,
+	0x40e3f007,
+	0xb96821f4,
+	0x07f102ff,
+	0x03f00300,
+	0x000fd002,
+	0xfec704bd,
+	0x02ee9450,
+	0x0700f7f1,
+	0xbb40f3f0,
+	0x21f400ef,
+	0x0007f168,
+	0x0203f002,
+	0xbd000fd0,
+	0x03f7f004,
+	0x037e21f5,
+	0x0100b7f1,
+	0xf102bfb9,
+	0xf00144e7,
 	0x21f440e3,
-	0x02ffb968,
-	0x030007f1,
-	0xd00203f0,
-	0x04bd000f,
-	0x9450fec7,
-	0xf7f102ee,
-	0xf3f00700,
-	0x00efbb40,
-	0xf16821f4,
-	0xf0020007,
-	0x0fd00203,
-	0xf004bd00,
-	0x21f503f7,
-	0xb7f1037e,
-	0xbfb90100,
-	0x44e7f102,
-	0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
-	0xf19d21f4,
-	0xbd0504b7,
-	0xb4abffb0,
-	0xf10f0bf4,
-	0xf0070007,
-	0x0bd00303,
-/* 0x07b3: ih_no_other */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x07d7: ctx_4170s */
+/* 0x079d: ih_no_fwmthd */
+	0x04b7f19d,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0007f10f,
+	0x0303f007,
+	0xbd000bd0,
+/* 0x07b5: ih_no_other */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x07db: ctx_4170s */
 	0xf001f800,
 	0xffb910f5,
 	0x70e7f102,
 	0x40e3f041,
 	0xf89d21f4,
-/* 0x07e9: ctx_4170w */
+/* 0x07ed: ctx_4170w */
 	0x70e7f100,
 	0x40e3f041,
 	0xb96821f4,
 	0xf4f002ff,
 	0xf01bf410,
-/* 0x07fe: ctx_redswitch */
+/* 0x0802: ctx_redswitch */
 	0xe7f100f8,
 	0xe5f00200,
 	0x20e5f040,
@@ -773,7 +774,7 @@
 	0xf0850007,
 	0x0ed00103,
 	0xf004bd00,
-/* 0x081a: ctx_redswitch_delay */
+/* 0x081e: ctx_redswitch_delay */
 	0xf2b608f7,
 	0xfd1bf401,
 	0x0400e5f1,
@@ -781,7 +782,7 @@
 	0x850007f1,
 	0xd00103f0,
 	0x04bd000e,
-/* 0x0836: ctx_86c */
+/* 0x083a: ctx_86c */
 	0x07f100f8,
 	0x03f01b00,
 	0x000fd002,
@@ -792,17 +793,17 @@
 	0xe7f102ff,
 	0xe3f0a86c,
 	0x9d21f441,
-/* 0x085e: ctx_mem */
+/* 0x0862: ctx_mem */
 	0x07f100f8,
 	0x03f08400,
 	0x000fd002,
-/* 0x086a: ctx_mem_wait */
+/* 0x086e: ctx_mem_wait */
 	0xf7f104bd,
 	0xf3f08400,
 	0x00ffcf02,
 	0xf405fffd,
 	0x00f8f31b,
-/* 0x087c: ctx_load */
+/* 0x0880: ctx_load */
 	0x99f094bd,
 	0x0007f105,
 	0x0203f00f,
@@ -819,7 +820,7 @@
 	0x0203f083,
 	0xbd0002d0,
 	0x07f7f004,
-	0x085e21f5,
+	0x086221f5,
 	0xc00007f1,
 	0xd00203f0,
 	0x04bd0002,
@@ -874,29 +875,29 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x099a: ctx_chan */
+/* 0x099e: ctx_chan */
 	0x21f500f8,
-	0xa7f0087c,
+	0xa7f00880,
 	0xd021f40c,
 	0xf505f7f0,
-	0xf8085e21,
-/* 0x09ad: ctx_mmio_exec */
+	0xf8086221,
+/* 0x09b1: ctx_mmio_exec */
 	0x41039800,
 	0x810007f1,
 	0xd00203f0,
 	0x04bd0003,
-/* 0x09be: ctx_mmio_loop */
+/* 0x09c2: ctx_mmio_loop */
 	0x34c434bd,
 	0x0f1bf4ff,
 	0x020057f1,
 	0xfa0653f0,
 	0x03f80535,
-/* 0x09d0: ctx_mmio_pull */
+/* 0x09d4: ctx_mmio_pull */
 	0x98804e98,
 	0x21f4814f,
 	0x0830b69d,
 	0xf40112b6,
-/* 0x09e2: ctx_mmio_done */
+/* 0x09e6: ctx_mmio_done */
 	0x0398df1b,
 	0x0007f116,
 	0x0203f081,
@@ -905,30 +906,30 @@
 	0x010017f1,
 	0xfa0613f0,
 	0x03f80601,
-/* 0x0a02: ctx_xfer */
+/* 0x0a06: ctx_xfer */
 	0xe7f000f8,
 	0x0007f104,
 	0x0303f002,
 	0xbd000ed0,
-/* 0x0a11: ctx_xfer_idle */
+/* 0x0a15: ctx_xfer_idle */
 	0x00e7f104,
 	0x03e3f000,
 	0xf100eecf,
 	0xf42000e4,
 	0x11f4f21b,
 	0x0d02f406,
-/* 0x0a28: ctx_xfer_pre */
+/* 0x0a2c: ctx_xfer_pre */
 	0xf510f7f0,
-	0xf4083621,
-/* 0x0a32: ctx_xfer_pre_load */
+	0xf4083a21,
+/* 0x0a36: ctx_xfer_pre_load */
 	0xf7f01c11,
-	0xd721f502,
-	0xe921f507,
-	0xfe21f507,
-	0xf5f4bd07,
-	0xf507d721,
-/* 0x0a4b: ctx_xfer_exec */
-	0x98087c21,
+	0xdb21f502,
+	0xed21f507,
+	0x0221f507,
+	0xf5f4bd08,
+	0xf507db21,
+/* 0x0a4f: ctx_xfer_exec */
+	0x98088021,
 	0x24bd1601,
 	0x050007f1,
 	0xd00103f0,
@@ -963,21 +964,21 @@
 	0xa7f01301,
 	0xd021f40c,
 	0xf505f7f0,
-	0xf4085e21,
-/* 0x0ada: ctx_xfer_post */
+	0xf4086221,
+/* 0x0ade: ctx_xfer_post */
 	0xf7f02e02,
-	0xd721f502,
+	0xdb21f502,
 	0xf5f4bd07,
-	0xf5083621,
+	0xf5083a21,
 	0xf5027f21,
-	0xbd07e921,
-	0xd721f5f4,
+	0xbd07ed21,
+	0xdb21f5f4,
 	0x1011f407,
 	0xfd400198,
 	0x0bf40511,
-	0xad21f507,
-/* 0x0b05: ctx_xfer_no_post_mmio */
-/* 0x0b05: ctx_xfer_done */
+	0xb121f507,
+/* 0x0b09: ctx_xfer_no_post_mmio */
+/* 0x0b09: ctx_xfer_done */
 	0x0000f809,
 	0x00000000,
 	0x00000000,
@@ -1040,5 +1041,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 8998687..0a1b8c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -528,10 +528,10 @@
 	0x0001d001,
 	0x17f104bd,
 	0xf7f00100,
-	0xd721f502,
-	0xe921f507,
+	0xdb21f502,
+	0xed21f507,
 	0x10f7f007,
-	0x083621f5,
+	0x083a21f5,
 	0x98000e98,
 	0x21f5010f,
 	0x14950150,
@@ -574,9 +574,9 @@
 	0xb6800040,
 	0x1bf40132,
 	0x00f7f0be,
-	0x083621f5,
+	0x083a21f5,
 	0xf500f7f0,
-	0xf107d721,
+	0xf107db21,
 	0xf0010007,
 	0x01d00203,
 	0xbd04bd00,
@@ -610,7 +610,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x31f40132,
-	0x0221f502,
+	0x0621f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -621,7 +621,7 @@
 	0x0203f037,
 	0xbd0009d0,
 	0x0131f404,
-	0x0a0221f5,
+	0x0a0621f5,
 	0x99f094bd,
 	0x0007f106,
 	0x0203f017,
@@ -631,7 +631,7 @@
 	0x12b920f9,
 	0x0132f402,
 	0xf50232f4,
-	0xfc0a0221,
+	0xfc0a0621,
 	0x0007f120,
 	0x0203f0c0,
 	0xbd0002d0,
@@ -640,7 +640,7 @@
 	0xf41f23c8,
 	0x31f40d0b,
 	0x0232f401,
-	0x0a0221f5,
+	0x0a0621f5,
 /* 0x063c: chsw_done */
 	0xf10127f0,
 	0xf0c30007,
@@ -654,7 +654,7 @@
 /* 0x0660: main_not_ctx_switch */
 	0xf401e4b0,
 	0xf2b90d1b,
-	0x9a21f502,
+	0x9e21f502,
 	0x460ef409,
 /* 0x0670: main_not_ctx_chan */
 	0xf402e4b0,
@@ -664,7 +664,7 @@
 	0x09d00203,
 	0xf404bd00,
 	0x32f40132,
-	0x0221f502,
+	0x0621f502,
 	0xf094bd0a,
 	0x07f10799,
 	0x03f01700,
@@ -682,90 +682,91 @@
 	0x04bd0002,
 	0xfea00ef5,
 /* 0x06c8: ih */
-	0x88fe80f9,
-	0xf980f901,
-	0xf9a0f990,
-	0xf9d0f9b0,
-	0xbdf0f9e0,
-	0x00a7f104,
-	0x00a3f002,
-	0xc400aacf,
-	0x0bf404ab,
-	0x10d7f030,
-	0x1a00e7f1,
-	0xcf00e3f0,
-	0xf7f100ee,
-	0xf3f01900,
-	0x00ffcf00,
-	0xb70421f4,
-	0xf00400b0,
-	0x07f101e7,
-	0x03f01d00,
-	0x000ed000,
-/* 0x071a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x10d7f00d,
-	0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
-	0xe40421f4,
-	0xf40400ab,
-	0xe7f16c0b,
-	0xe3f00708,
-	0x6821f440,
-	0xf102ffb9,
-	0xf0040007,
-	0x0fd00203,
-	0xf104bd00,
-	0xf00704e7,
+	0x80f900f9,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0xf104bdf0,
+	0xf00200a7,
+	0xaacf00a3,
+	0x04abc400,
+	0xf0300bf4,
+	0xe7f110d7,
+	0xe3f01a00,
+	0x00eecf00,
+	0x1900f7f1,
+	0xcf00f3f0,
+	0x21f400ff,
+	0x00b0b704,
+	0x01e7f004,
+	0x1d0007f1,
+	0xd00003f0,
+	0x04bd000e,
+/* 0x071c: ih_no_fifo */
+	0x0100abe4,
+	0xf00d0bf4,
+	0xe7f110d7,
+	0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+	0x00abe404,
+	0x6c0bf404,
+	0x0708e7f1,
+	0xf440e3f0,
+	0xffb96821,
+	0x0007f102,
+	0x0203f004,
+	0xbd000fd0,
+	0x04e7f104,
+	0x40e3f007,
+	0xb96821f4,
+	0x07f102ff,
+	0x03f00300,
+	0x000fd002,
+	0xfec704bd,
+	0x02ee9450,
+	0x0700f7f1,
+	0xbb40f3f0,
+	0x21f400ef,
+	0x0007f168,
+	0x0203f002,
+	0xbd000fd0,
+	0x03f7f004,
+	0x037e21f5,
+	0x0100b7f1,
+	0xf102bfb9,
+	0xf00144e7,
 	0x21f440e3,
-	0x02ffb968,
-	0x030007f1,
-	0xd00203f0,
-	0x04bd000f,
-	0x9450fec7,
-	0xf7f102ee,
-	0xf3f00700,
-	0x00efbb40,
-	0xf16821f4,
-	0xf0020007,
-	0x0fd00203,
-	0xf004bd00,
-	0x21f503f7,
-	0xb7f1037e,
-	0xbfb90100,
-	0x44e7f102,
-	0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
-	0xf19d21f4,
-	0xbd0504b7,
-	0xb4abffb0,
-	0xf10f0bf4,
-	0xf0070007,
-	0x0bd00303,
-/* 0x07b3: ih_no_other */
-	0xf104bd00,
-	0xf0010007,
-	0x0ad00003,
-	0xfc04bd00,
-	0xfce0fcf0,
-	0xfcb0fcd0,
-	0xfc90fca0,
-	0x0088fe80,
-	0x32f480fc,
-/* 0x07d7: ctx_4170s */
+/* 0x079d: ih_no_fwmthd */
+	0x04b7f19d,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0007f10f,
+	0x0303f007,
+	0xbd000bd0,
+/* 0x07b5: ih_no_other */
+	0x0007f104,
+	0x0003f001,
+	0xbd000ad0,
+	0xfcf0fc04,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0x32f400fc,
+/* 0x07db: ctx_4170s */
 	0xf001f800,
 	0xffb910f5,
 	0x70e7f102,
 	0x40e3f041,
 	0xf89d21f4,
-/* 0x07e9: ctx_4170w */
+/* 0x07ed: ctx_4170w */
 	0x70e7f100,
 	0x40e3f041,
 	0xb96821f4,
 	0xf4f002ff,
 	0xf01bf410,
-/* 0x07fe: ctx_redswitch */
+/* 0x0802: ctx_redswitch */
 	0xe7f100f8,
 	0xe5f00200,
 	0x20e5f040,
@@ -773,7 +774,7 @@
 	0xf0850007,
 	0x0ed00103,
 	0xf004bd00,
-/* 0x081a: ctx_redswitch_delay */
+/* 0x081e: ctx_redswitch_delay */
 	0xf2b608f7,
 	0xfd1bf401,
 	0x0400e5f1,
@@ -781,7 +782,7 @@
 	0x850007f1,
 	0xd00103f0,
 	0x04bd000e,
-/* 0x0836: ctx_86c */
+/* 0x083a: ctx_86c */
 	0x07f100f8,
 	0x03f02300,
 	0x000fd002,
@@ -792,17 +793,17 @@
 	0xe7f102ff,
 	0xe3f0a88c,
 	0x9d21f441,
-/* 0x085e: ctx_mem */
+/* 0x0862: ctx_mem */
 	0x07f100f8,
 	0x03f08400,
 	0x000fd002,
-/* 0x086a: ctx_mem_wait */
+/* 0x086e: ctx_mem_wait */
 	0xf7f104bd,
 	0xf3f08400,
 	0x00ffcf02,
 	0xf405fffd,
 	0x00f8f31b,
-/* 0x087c: ctx_load */
+/* 0x0880: ctx_load */
 	0x99f094bd,
 	0x0007f105,
 	0x0203f037,
@@ -819,7 +820,7 @@
 	0x0203f083,
 	0xbd0002d0,
 	0x07f7f004,
-	0x085e21f5,
+	0x086221f5,
 	0xc00007f1,
 	0xd00203f0,
 	0x04bd0002,
@@ -874,29 +875,29 @@
 	0x170007f1,
 	0xd00203f0,
 	0x04bd0009,
-/* 0x099a: ctx_chan */
+/* 0x099e: ctx_chan */
 	0x21f500f8,
-	0xa7f0087c,
+	0xa7f00880,
 	0xd021f40c,
 	0xf505f7f0,
-	0xf8085e21,
-/* 0x09ad: ctx_mmio_exec */
+	0xf8086221,
+/* 0x09b1: ctx_mmio_exec */
 	0x41039800,
 	0x810007f1,
 	0xd00203f0,
 	0x04bd0003,
-/* 0x09be: ctx_mmio_loop */
+/* 0x09c2: ctx_mmio_loop */
 	0x34c434bd,
 	0x0f1bf4ff,
 	0x020057f1,
 	0xfa0653f0,
 	0x03f80535,
-/* 0x09d0: ctx_mmio_pull */
+/* 0x09d4: ctx_mmio_pull */
 	0x98804e98,
 	0x21f4814f,
 	0x0830b69d,
 	0xf40112b6,
-/* 0x09e2: ctx_mmio_done */
+/* 0x09e6: ctx_mmio_done */
 	0x0398df1b,
 	0x0007f116,
 	0x0203f081,
@@ -905,30 +906,30 @@
 	0x010017f1,
 	0xfa0613f0,
 	0x03f80601,
-/* 0x0a02: ctx_xfer */
+/* 0x0a06: ctx_xfer */
 	0xe7f000f8,
 	0x0007f104,
 	0x0303f002,
 	0xbd000ed0,
-/* 0x0a11: ctx_xfer_idle */
+/* 0x0a15: ctx_xfer_idle */
 	0x00e7f104,
 	0x03e3f000,
 	0xf100eecf,
 	0xf42000e4,
 	0x11f4f21b,
 	0x0d02f406,
-/* 0x0a28: ctx_xfer_pre */
+/* 0x0a2c: ctx_xfer_pre */
 	0xf510f7f0,
-	0xf4083621,
-/* 0x0a32: ctx_xfer_pre_load */
+	0xf4083a21,
+/* 0x0a36: ctx_xfer_pre_load */
 	0xf7f01c11,
-	0xd721f502,
-	0xe921f507,
-	0xfe21f507,
-	0xf5f4bd07,
-	0xf507d721,
-/* 0x0a4b: ctx_xfer_exec */
-	0x98087c21,
+	0xdb21f502,
+	0xed21f507,
+	0x0221f507,
+	0xf5f4bd08,
+	0xf507db21,
+/* 0x0a4f: ctx_xfer_exec */
+	0x98088021,
 	0x24bd1601,
 	0x050007f1,
 	0xd00103f0,
@@ -963,21 +964,21 @@
 	0xa7f01301,
 	0xd021f40c,
 	0xf505f7f0,
-	0xf4085e21,
-/* 0x0ada: ctx_xfer_post */
+	0xf4086221,
+/* 0x0ade: ctx_xfer_post */
 	0xf7f02e02,
-	0xd721f502,
+	0xdb21f502,
 	0xf5f4bd07,
-	0xf5083621,
+	0xf5083a21,
 	0xf5027f21,
-	0xbd07e921,
-	0xd721f5f4,
+	0xbd07ed21,
+	0xdb21f5f4,
 	0x1011f407,
 	0xfd400198,
 	0x0bf40511,
-	0xad21f507,
-/* 0x0b05: ctx_xfer_no_post_mmio */
-/* 0x0b05: ctx_xfer_done */
+	0xb121f507,
+/* 0x0b09: ctx_xfer_no_post_mmio */
+/* 0x0b09: ctx_xfer_done */
 	0x0000f809,
 	0x00000000,
 	0x00000000,
@@ -1040,5 +1041,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index 0e98fa4..16869d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -478,10 +478,10 @@
 	0x01040080,
 	0xbd0001f6,
 	0x01004104,
-	0xa87e020f,
-	0xb77e0006,
+	0xac7e020f,
+	0xbb7e0006,
 	0x100f0006,
-	0x0006f97e,
+	0x0006fd7e,
 	0x98000e98,
 	0x207e010f,
 	0x14950001,
@@ -523,8 +523,8 @@
 	0x800040b7,
 	0xf40132b6,
 	0x000fb41b,
-	0x0006f97e,
-	0xa87e000f,
+	0x0006fd7e,
+	0xac7e000f,
 	0x00800006,
 	0x01f60201,
 	0xbd04bd00,
@@ -554,7 +554,7 @@
 	0x0009f602,
 	0x32f404bd,
 	0x0231f401,
-	0x00087c7e,
+	0x0008807e,
 	0x99f094bd,
 	0x17008007,
 	0x0009f602,
@@ -563,7 +563,7 @@
 	0x37008006,
 	0x0009f602,
 	0x31f404bd,
-	0x087c7e01,
+	0x08807e01,
 	0xf094bd00,
 	0x00800699,
 	0x09f60217,
@@ -572,7 +572,7 @@
 	0x20f92f0e,
 	0x32f412b2,
 	0x0232f401,
-	0x00087c7e,
+	0x0008807e,
 	0x008020fc,
 	0x02f602c0,
 	0xf404bd00,
@@ -580,7 +580,7 @@
 	0x23c8130e,
 	0x0d0bf41f,
 	0xf40131f4,
-	0x7c7e0232,
+	0x807e0232,
 /* 0x054e: chsw_done */
 	0x01020008,
 	0x02c30080,
@@ -593,7 +593,7 @@
 	0xb0ff2a0e,
 	0x1bf401e4,
 	0x7ef2b20c,
-	0xf400081c,
+	0xf4000820,
 /* 0x057a: main_not_ctx_chan */
 	0xe4b0400e,
 	0x2c1bf402,
@@ -602,7 +602,7 @@
 	0x0009f602,
 	0x32f404bd,
 	0x0232f401,
-	0x00087c7e,
+	0x0008807e,
 	0x99f094bd,
 	0x17008007,
 	0x0009f602,
@@ -618,91 +618,92 @@
 	0xbd0002f6,
 	0xcc0ef504,
 /* 0x05c9: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x004a04bd,
-	0x00aacf02,
-	0xf404abc4,
-	0x100d230b,
-	0xcf1a004e,
-	0x004f00ee,
-	0x00ffcf19,
+	0xf900f9fe,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0xcf02004a,
+	0xabc400aa,
+	0x230bf404,
+	0x004e100d,
+	0x00eecf1a,
+	0xcf19004f,
+	0x047e00ff,
+	0xb0b70000,
+	0x010e0400,
+	0xf61d0040,
+	0x04bd000e,
+/* 0x060c: ih_no_fifo */
+	0x0100abe4,
+	0x0d0c0bf4,
+	0x40014e10,
 	0x0000047e,
-	0x0400b0b7,
-	0x0040010e,
-	0x000ef61d,
-/* 0x060a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x4e100d0c,
-	0x047e4001,
-/* 0x061a: ih_no_ctxsw */
-	0xabe40000,
-	0x0bf40400,
-	0x07088e56,
-	0x00657e40,
-	0x80ffb200,
-	0xf6020400,
-	0x04bd000f,
-	0x4007048e,
-	0x0000657e,
-	0x0080ffb2,
-	0x0ff60203,
-	0xc704bd00,
-	0xee9450fe,
-	0x07008f02,
-	0x00efbb40,
-	0x0000657e,
-	0x02020080,
+/* 0x061c: ih_no_ctxsw */
+	0x0400abe4,
+	0x8e560bf4,
+	0x7e400708,
+	0xb2000065,
+	0x040080ff,
+	0x000ff602,
+	0x048e04bd,
+	0x657e4007,
+	0xffb20000,
+	0x02030080,
 	0xbd000ff6,
-	0x7e030f04,
-	0x4b0002f8,
-	0xbfb20100,
-	0x4001448e,
-	0x00008f7e,
-/* 0x0674: ih_no_fwmthd */
-	0xbd05044b,
-	0xb4abffb0,
-	0x800c0bf4,
-	0xf6030700,
-	0x04bd000b,
-/* 0x0688: ih_no_other */
-	0xf6010040,
-	0x04bd000a,
-	0xe0fcf0fc,
-	0xb0fcd0fc,
-	0x90fca0fc,
-	0x88fe80fc,
-	0xf480fc00,
+	0x50fec704,
+	0x8f02ee94,
+	0xbb400700,
+	0x657e00ef,
+	0x00800000,
+	0x0ff60202,
+	0x0f04bd00,
+	0x02f87e03,
+	0x01004b00,
+	0x448ebfb2,
+	0x8f7e4001,
+/* 0x0676: ih_no_fwmthd */
+	0x044b0000,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0700800c,
+	0x000bf603,
+/* 0x068a: ih_no_other */
+	0x004004bd,
+	0x000af601,
+	0xf0fc04bd,
+	0xd0fce0fc,
+	0xa0fcb0fc,
+	0x80fc90fc,
+	0xfc0088fe,
+	0xf400fc80,
 	0x01f80032,
-/* 0x06a8: ctx_4170s */
+/* 0x06ac: ctx_4170s */
 	0xb210f5f0,
 	0x41708eff,
 	0x008f7e40,
-/* 0x06b7: ctx_4170w */
+/* 0x06bb: ctx_4170w */
 	0x8e00f800,
 	0x7e404170,
 	0xb2000065,
 	0x10f4f0ff,
 	0xf8f31bf4,
-/* 0x06c9: ctx_redswitch */
+/* 0x06cd: ctx_redswitch */
 	0x02004e00,
 	0xf040e5f0,
 	0xe5f020e5,
 	0x85008010,
 	0x000ef601,
 	0x080f04bd,
-/* 0x06e0: ctx_redswitch_delay */
+/* 0x06e4: ctx_redswitch_delay */
 	0xf401f2b6,
 	0xe5f1fd1b,
 	0xe5f10400,
 	0x00800100,
 	0x0ef60185,
 	0xf804bd00,
-/* 0x06f9: ctx_86c */
+/* 0x06fd: ctx_86c */
 	0x23008000,
 	0x000ff602,
 	0xffb204bd,
@@ -711,15 +712,15 @@
 	0x8c8effb2,
 	0x8f7e41a8,
 	0x00f80000,
-/* 0x0718: ctx_mem */
+/* 0x071c: ctx_mem */
 	0x02840080,
 	0xbd000ff6,
-/* 0x0721: ctx_mem_wait */
+/* 0x0725: ctx_mem_wait */
 	0x84008f04,
 	0x00ffcf02,
 	0xf405fffd,
 	0x00f8f61b,
-/* 0x0730: ctx_load */
+/* 0x0734: ctx_load */
 	0x99f094bd,
 	0x37008005,
 	0x0009f602,
@@ -733,7 +734,7 @@
 	0x02830080,
 	0xbd0002f6,
 	0x7e070f04,
-	0x80000718,
+	0x8000071c,
 	0xf602c000,
 	0x04bd0002,
 	0xf0000bfe,
@@ -779,28 +780,28 @@
 	0x17008005,
 	0x0009f602,
 	0x00f804bd,
-/* 0x081c: ctx_chan */
-	0x0007307e,
+/* 0x0820: ctx_chan */
+	0x0007347e,
 	0xb87e0c0a,
 	0x050f0000,
-	0x0007187e,
-/* 0x082e: ctx_mmio_exec */
+	0x00071c7e,
+/* 0x0832: ctx_mmio_exec */
 	0x039800f8,
 	0x81008041,
 	0x0003f602,
 	0x34bd04bd,
-/* 0x083c: ctx_mmio_loop */
+/* 0x0840: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x00450e1b,
 	0x0653f002,
 	0xf80535fa,
-/* 0x084d: ctx_mmio_pull */
+/* 0x0851: ctx_mmio_pull */
 	0x804e9803,
 	0x7e814f98,
 	0xb600008f,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0860: ctx_mmio_done */
+/* 0x0864: ctx_mmio_done */
 	0x80160398,
 	0xf6028100,
 	0x04bd0003,
@@ -808,27 +809,27 @@
 	0x13f00100,
 	0x0601fa06,
 	0x00f803f8,
-/* 0x087c: ctx_xfer */
+/* 0x0880: ctx_xfer */
 	0x0080040e,
 	0x0ef60302,
-/* 0x0887: ctx_xfer_idle */
+/* 0x088b: ctx_xfer_idle */
 	0x8e04bd00,
 	0xcf030000,
 	0xe4f100ee,
 	0x1bf42000,
 	0x0611f4f5,
-/* 0x089b: ctx_xfer_pre */
+/* 0x089f: ctx_xfer_pre */
 	0x0f0c02f4,
-	0x06f97e10,
+	0x06fd7e10,
 	0x1b11f400,
-/* 0x08a4: ctx_xfer_pre_load */
-	0xa87e020f,
-	0xb77e0006,
-	0xc97e0006,
+/* 0x08a8: ctx_xfer_pre_load */
+	0xac7e020f,
+	0xbb7e0006,
+	0xcd7e0006,
 	0xf4bd0006,
-	0x0006a87e,
-	0x0007307e,
-/* 0x08bc: ctx_xfer_exec */
+	0x0006ac7e,
+	0x0007347e,
+/* 0x08c0: ctx_xfer_exec */
 	0xbd160198,
 	0x05008024,
 	0x0002f601,
@@ -858,21 +859,21 @@
 	0x01f40002,
 	0x7e0c0a12,
 	0x0f0000b8,
-	0x07187e05,
+	0x071c7e05,
 	0x2d02f400,
-/* 0x0938: ctx_xfer_post */
-	0xa87e020f,
+/* 0x093c: ctx_xfer_post */
+	0xac7e020f,
 	0xf4bd0006,
-	0x0006f97e,
+	0x0006fd7e,
 	0x0002277e,
-	0x0006b77e,
-	0xa87ef4bd,
+	0x0006bb7e,
+	0xac7ef4bd,
 	0x11f40006,
 	0x40019810,
 	0xf40511fd,
-	0x2e7e070b,
-/* 0x0962: ctx_xfer_no_post_mmio */
-/* 0x0962: ctx_xfer_done */
+	0x327e070b,
+/* 0x0966: ctx_xfer_no_post_mmio */
+/* 0x0966: ctx_xfer_done */
 	0x00f80008,
 	0x00000000,
 	0x00000000,
@@ -912,5 +913,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5f953c5..d6343d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -478,10 +478,10 @@
 	0x01040080,
 	0xbd0001f6,
 	0x01004104,
-	0xa87e020f,
-	0xb77e0006,
+	0xac7e020f,
+	0xbb7e0006,
 	0x100f0006,
-	0x0006f97e,
+	0x0006fd7e,
 	0x98000e98,
 	0x207e010f,
 	0x14950001,
@@ -523,8 +523,8 @@
 	0x800040b7,
 	0xf40132b6,
 	0x000fb41b,
-	0x0006f97e,
-	0xa87e000f,
+	0x0006fd7e,
+	0xac7e000f,
 	0x00800006,
 	0x01f60201,
 	0xbd04bd00,
@@ -554,7 +554,7 @@
 	0x0009f602,
 	0x32f404bd,
 	0x0231f401,
-	0x00087c7e,
+	0x0008807e,
 	0x99f094bd,
 	0x17008007,
 	0x0009f602,
@@ -563,7 +563,7 @@
 	0x37008006,
 	0x0009f602,
 	0x31f404bd,
-	0x087c7e01,
+	0x08807e01,
 	0xf094bd00,
 	0x00800699,
 	0x09f60217,
@@ -572,7 +572,7 @@
 	0x20f92f0e,
 	0x32f412b2,
 	0x0232f401,
-	0x00087c7e,
+	0x0008807e,
 	0x008020fc,
 	0x02f602c0,
 	0xf404bd00,
@@ -580,7 +580,7 @@
 	0x23c8130e,
 	0x0d0bf41f,
 	0xf40131f4,
-	0x7c7e0232,
+	0x807e0232,
 /* 0x054e: chsw_done */
 	0x01020008,
 	0x02c30080,
@@ -593,7 +593,7 @@
 	0xb0ff2a0e,
 	0x1bf401e4,
 	0x7ef2b20c,
-	0xf400081c,
+	0xf4000820,
 /* 0x057a: main_not_ctx_chan */
 	0xe4b0400e,
 	0x2c1bf402,
@@ -602,7 +602,7 @@
 	0x0009f602,
 	0x32f404bd,
 	0x0232f401,
-	0x00087c7e,
+	0x0008807e,
 	0x99f094bd,
 	0x17008007,
 	0x0009f602,
@@ -618,91 +618,92 @@
 	0xbd0002f6,
 	0xcc0ef504,
 /* 0x05c9: ih */
-	0xfe80f9fe,
-	0x80f90188,
-	0xa0f990f9,
-	0xd0f9b0f9,
-	0xf0f9e0f9,
-	0x004a04bd,
-	0x00aacf02,
-	0xf404abc4,
-	0x100d230b,
-	0xcf1a004e,
-	0x004f00ee,
-	0x00ffcf19,
+	0xf900f9fe,
+	0x0188fe80,
+	0x90f980f9,
+	0xb0f9a0f9,
+	0xe0f9d0f9,
+	0x04bdf0f9,
+	0xcf02004a,
+	0xabc400aa,
+	0x230bf404,
+	0x004e100d,
+	0x00eecf1a,
+	0xcf19004f,
+	0x047e00ff,
+	0xb0b70000,
+	0x010e0400,
+	0xf61d0040,
+	0x04bd000e,
+/* 0x060c: ih_no_fifo */
+	0x0100abe4,
+	0x0d0c0bf4,
+	0x40014e10,
 	0x0000047e,
-	0x0400b0b7,
-	0x0040010e,
-	0x000ef61d,
-/* 0x060a: ih_no_fifo */
-	0xabe404bd,
-	0x0bf40100,
-	0x4e100d0c,
-	0x047e4001,
-/* 0x061a: ih_no_ctxsw */
-	0xabe40000,
-	0x0bf40400,
-	0x07088e56,
-	0x00657e40,
-	0x80ffb200,
-	0xf6020400,
-	0x04bd000f,
-	0x4007048e,
-	0x0000657e,
-	0x0080ffb2,
-	0x0ff60203,
-	0xc704bd00,
-	0xee9450fe,
-	0x07008f02,
-	0x00efbb40,
-	0x0000657e,
-	0x02020080,
+/* 0x061c: ih_no_ctxsw */
+	0x0400abe4,
+	0x8e560bf4,
+	0x7e400708,
+	0xb2000065,
+	0x040080ff,
+	0x000ff602,
+	0x048e04bd,
+	0x657e4007,
+	0xffb20000,
+	0x02030080,
 	0xbd000ff6,
-	0x7e030f04,
-	0x4b0002f8,
-	0xbfb20100,
-	0x4001448e,
-	0x00008f7e,
-/* 0x0674: ih_no_fwmthd */
-	0xbd05044b,
-	0xb4abffb0,
-	0x800c0bf4,
-	0xf6030700,
-	0x04bd000b,
-/* 0x0688: ih_no_other */
-	0xf6010040,
-	0x04bd000a,
-	0xe0fcf0fc,
-	0xb0fcd0fc,
-	0x90fca0fc,
-	0x88fe80fc,
-	0xf480fc00,
+	0x50fec704,
+	0x8f02ee94,
+	0xbb400700,
+	0x657e00ef,
+	0x00800000,
+	0x0ff60202,
+	0x0f04bd00,
+	0x02f87e03,
+	0x01004b00,
+	0x448ebfb2,
+	0x8f7e4001,
+/* 0x0676: ih_no_fwmthd */
+	0x044b0000,
+	0xffb0bd05,
+	0x0bf4b4ab,
+	0x0700800c,
+	0x000bf603,
+/* 0x068a: ih_no_other */
+	0x004004bd,
+	0x000af601,
+	0xf0fc04bd,
+	0xd0fce0fc,
+	0xa0fcb0fc,
+	0x80fc90fc,
+	0xfc0088fe,
+	0xf400fc80,
 	0x01f80032,
-/* 0x06a8: ctx_4170s */
+/* 0x06ac: ctx_4170s */
 	0xb210f5f0,
 	0x41708eff,
 	0x008f7e40,
-/* 0x06b7: ctx_4170w */
+/* 0x06bb: ctx_4170w */
 	0x8e00f800,
 	0x7e404170,
 	0xb2000065,
 	0x10f4f0ff,
 	0xf8f31bf4,
-/* 0x06c9: ctx_redswitch */
+/* 0x06cd: ctx_redswitch */
 	0x02004e00,
 	0xf040e5f0,
 	0xe5f020e5,
 	0x85008010,
 	0x000ef601,
 	0x080f04bd,
-/* 0x06e0: ctx_redswitch_delay */
+/* 0x06e4: ctx_redswitch_delay */
 	0xf401f2b6,
 	0xe5f1fd1b,
 	0xe5f10400,
 	0x00800100,
 	0x0ef60185,
 	0xf804bd00,
-/* 0x06f9: ctx_86c */
+/* 0x06fd: ctx_86c */
 	0x23008000,
 	0x000ff602,
 	0xffb204bd,
@@ -711,15 +712,15 @@
 	0x8c8effb2,
 	0x8f7e41a8,
 	0x00f80000,
-/* 0x0718: ctx_mem */
+/* 0x071c: ctx_mem */
 	0x02840080,
 	0xbd000ff6,
-/* 0x0721: ctx_mem_wait */
+/* 0x0725: ctx_mem_wait */
 	0x84008f04,
 	0x00ffcf02,
 	0xf405fffd,
 	0x00f8f61b,
-/* 0x0730: ctx_load */
+/* 0x0734: ctx_load */
 	0x99f094bd,
 	0x37008005,
 	0x0009f602,
@@ -733,7 +734,7 @@
 	0x02830080,
 	0xbd0002f6,
 	0x7e070f04,
-	0x80000718,
+	0x8000071c,
 	0xf602c000,
 	0x04bd0002,
 	0xf0000bfe,
@@ -779,28 +780,28 @@
 	0x17008005,
 	0x0009f602,
 	0x00f804bd,
-/* 0x081c: ctx_chan */
-	0x0007307e,
+/* 0x0820: ctx_chan */
+	0x0007347e,
 	0xb87e0c0a,
 	0x050f0000,
-	0x0007187e,
-/* 0x082e: ctx_mmio_exec */
+	0x00071c7e,
+/* 0x0832: ctx_mmio_exec */
 	0x039800f8,
 	0x81008041,
 	0x0003f602,
 	0x34bd04bd,
-/* 0x083c: ctx_mmio_loop */
+/* 0x0840: ctx_mmio_loop */
 	0xf4ff34c4,
 	0x00450e1b,
 	0x0653f002,
 	0xf80535fa,
-/* 0x084d: ctx_mmio_pull */
+/* 0x0851: ctx_mmio_pull */
 	0x804e9803,
 	0x7e814f98,
 	0xb600008f,
 	0x12b60830,
 	0xdf1bf401,
-/* 0x0860: ctx_mmio_done */
+/* 0x0864: ctx_mmio_done */
 	0x80160398,
 	0xf6028100,
 	0x04bd0003,
@@ -808,27 +809,27 @@
 	0x13f00100,
 	0x0601fa06,
 	0x00f803f8,
-/* 0x087c: ctx_xfer */
+/* 0x0880: ctx_xfer */
 	0x0080040e,
 	0x0ef60302,
-/* 0x0887: ctx_xfer_idle */
+/* 0x088b: ctx_xfer_idle */
 	0x8e04bd00,
 	0xcf030000,
 	0xe4f100ee,
 	0x1bf42000,
 	0x0611f4f5,
-/* 0x089b: ctx_xfer_pre */
+/* 0x089f: ctx_xfer_pre */
 	0x0f0c02f4,
-	0x06f97e10,
+	0x06fd7e10,
 	0x1b11f400,
-/* 0x08a4: ctx_xfer_pre_load */
-	0xa87e020f,
-	0xb77e0006,
-	0xc97e0006,
+/* 0x08a8: ctx_xfer_pre_load */
+	0xac7e020f,
+	0xbb7e0006,
+	0xcd7e0006,
 	0xf4bd0006,
-	0x0006a87e,
-	0x0007307e,
-/* 0x08bc: ctx_xfer_exec */
+	0x0006ac7e,
+	0x0007347e,
+/* 0x08c0: ctx_xfer_exec */
 	0xbd160198,
 	0x05008024,
 	0x0002f601,
@@ -858,21 +859,21 @@
 	0x01f40002,
 	0x7e0c0a12,
 	0x0f0000b8,
-	0x07187e05,
+	0x071c7e05,
 	0x2d02f400,
-/* 0x0938: ctx_xfer_post */
-	0xa87e020f,
+/* 0x093c: ctx_xfer_post */
+	0xac7e020f,
 	0xf4bd0006,
-	0x0006f97e,
+	0x0006fd7e,
 	0x0002277e,
-	0x0006b77e,
-	0xa87ef4bd,
+	0x0006bb7e,
+	0xac7ef4bd,
 	0x11f40006,
 	0x40019810,
 	0xf40511fd,
-	0x2e7e070b,
-/* 0x0962: ctx_xfer_no_post_mmio */
-/* 0x0962: ctx_xfer_done */
+	0x327e070b,
+/* 0x0966: ctx_xfer_no_post_mmio */
+/* 0x0966: ctx_xfer_done */
 	0x00f80008,
 	0x00000000,
 	0x00000000,
@@ -912,5 +913,4 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
-	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1f81069..c56a886 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -27,6 +27,8 @@
 
 #include <core/client.h>
 #include <core/option.h>
+#include <core/firmware.h>
+#include <subdev/secboot.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/pmu.h>
@@ -1427,21 +1429,40 @@
 	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
+	struct nvkm_secboot *sb = device->secboot;
 	int i;
 
 	if (gr->firmware) {
 		/* load fuc microcode */
 		nvkm_mc_unk260(device->mc, 0);
-		gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
-		gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+
+		/* securely-managed falcons must be reset using secure boot */
+		if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
+			nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+		else
+			gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
+					 &gr->fuc409d);
+		if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
+			nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+		else
+			gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
+					 &gr->fuc41ad);
+
 		nvkm_mc_unk260(device->mc, 1);
 
 		/* start both of them running */
 		nvkm_wr32(device, 0x409840, 0xffffffff);
 		nvkm_wr32(device, 0x41a10c, 0x00000000);
 		nvkm_wr32(device, 0x40910c, 0x00000000);
-		nvkm_wr32(device, 0x41a100, 0x00000002);
-		nvkm_wr32(device, 0x409100, 0x00000002);
+
+		if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
+			nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS);
+		else
+			nvkm_wr32(device, 0x41a100, 0x00000002);
+		if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
+			nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS);
+		else
+			nvkm_wr32(device, 0x409100, 0x00000002);
 		if (nvkm_msec(device, 2000,
 			if (nvkm_rd32(device, 0x409800) & 0x00000001)
 				break;
@@ -1683,6 +1704,12 @@
 	fuc->data = NULL;
 }
 
+static void
+gf100_gr_dtor_init(struct gf100_gr_pack *pack)
+{
+	vfree(pack);
+}
+
 void *
 gf100_gr_dtor(struct nvkm_gr *base)
 {
@@ -1697,6 +1724,11 @@
 	gf100_gr_dtor_fw(&gr->fuc41ac);
 	gf100_gr_dtor_fw(&gr->fuc41ad);
 
+	gf100_gr_dtor_init(gr->fuc_bundle);
+	gf100_gr_dtor_init(gr->fuc_method);
+	gf100_gr_dtor_init(gr->fuc_sw_ctx);
+	gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+
 	nvkm_memory_del(&gr->unk4188b8);
 	nvkm_memory_del(&gr->unk4188b4);
 	return gr;
@@ -1720,22 +1752,9 @@
 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
 	const struct firmware *fw;
-	char f[64];
-	char cname[16];
 	int ret;
-	int i;
 
-	/* Convert device name to lowercase */
-	strncpy(cname, device->chip->name, sizeof(cname));
-	cname[sizeof(cname) - 1] = '\0';
-	i = strlen(cname);
-	while (i) {
-		--i;
-		cname[i] = tolower(cname[i]);
-	}
-
-	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
-	ret = request_firmware(&fw, f, device->dev);
+	ret = nvkm_firmware_get(device, fwname, &fw);
 	if (ret) {
 		nvkm_error(subdev, "failed to load %s\n", fwname);
 		return ret;
@@ -1743,7 +1762,7 @@
 
 	fuc->size = fw->size;
 	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-	release_firmware(fw);
+	nvkm_firmware_put(fw);
 	return (fuc->data != NULL) ? 0 : -ENOMEM;
 }
 
@@ -1763,15 +1782,6 @@
 	if (ret)
 		return ret;
 
-	if (gr->firmware) {
-		nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
-		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
-		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
-		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
-		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
-			return -ENODEV;
-	}
-
 	return 0;
 }
 
@@ -1780,10 +1790,25 @@
 	      int index, struct nvkm_gr **pgr)
 {
 	struct gf100_gr *gr;
+	int ret;
+
 	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
 		return -ENOMEM;
 	*pgr = &gr->base;
-	return gf100_gr_ctor(func, device, index, gr);
+
+	ret = gf100_gr_ctor(func, device, index, gr);
+	if (ret)
+		return ret;
+
+	if (gr->firmware) {
+		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+			return -ENODEV;
+	}
+
+	return 0;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 02e78b8..f0c6acb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -82,7 +82,7 @@
 
 	/*
 	 * Used if the register packs are loaded from NVIDIA fw instead of
-	 * using hardcoded arrays.
+	 * using hardcoded arrays. To be allocated with vzalloc().
 	 */
 	struct gf100_gr_pack *fuc_sw_nonctx;
 	struct gf100_gr_pack *fuc_sw_ctx;
@@ -138,12 +138,9 @@
 
 int gk104_gr_init(struct gf100_gr *);
 
-int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
-		  int, struct nvkm_gr **);
-void gk20a_gr_dtor(struct gf100_gr *);
 int gk20a_gr_init(struct gf100_gr *);
 
-int gm204_gr_init(struct gf100_gr *);
+int gm200_gr_init(struct gf100_gr *);
 
 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
 
@@ -204,6 +201,17 @@
 void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
 int  gf100_gr_init_ctxctl(struct gf100_gr *);
 
+/* external bundles loading functions */
+int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
+			struct gf100_gr_pack **);
+int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
+			 struct gf100_gr_pack **);
+int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
+			  struct gf100_gr_pack **);
+
+int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
+		  struct nvkm_gr **);
+
 /* register init value lists */
 
 extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -279,6 +287,4 @@
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
 void gm107_gr_init_bios(struct gf100_gr *);
-
-extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index b8758d3..7ffb8a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -26,37 +26,40 @@
 
 #include <nvif/class.h>
 
-static void
-gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
-{
-	vfree(pack);
-}
-
 struct gk20a_fw_av
 {
 	u32 addr;
 	u32 data;
 };
 
-static struct gf100_gr_pack *
-gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
+		    struct gf100_gr_pack **ppack)
 {
+	struct gf100_gr_fuc fuc;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
-	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int nent;
+	int ret;
 	int i;
 
+	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	if (ret)
+		return ret;
+
+	nent = (fuc.size / sizeof(struct gk20a_fw_av));
 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
-	if (!pack)
-		return ERR_PTR(-ENOMEM);
+	if (!pack) {
+		ret = -ENOMEM;
+		goto end;
+	}
 
 	init = (void *)(pack + 2);
-
 	pack[0].init = init;
 
 	for (i = 0; i < nent; i++) {
 		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
 
 		ent->addr = av->addr;
 		ent->data = av->data;
@@ -64,7 +67,11 @@
 		ent->pitch = 1;
 	}
 
-	return pack;
+	*ppack = pack;
+
+end:
+	gf100_gr_dtor_fw(&fuc);
+	return ret;
 }
 
 struct gk20a_fw_aiv
@@ -74,25 +81,34 @@
 	u32 data;
 };
 
-static struct gf100_gr_pack *
-gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
+		     struct gf100_gr_pack **ppack)
 {
+	struct gf100_gr_fuc fuc;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
-	const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+	int nent;
+	int ret;
 	int i;
 
+	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	if (ret)
+		return ret;
+
+	nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
-	if (!pack)
-		return ERR_PTR(-ENOMEM);
+	if (!pack) {
+		ret = -ENOMEM;
+		goto end;
+	}
 
 	init = (void *)(pack + 2);
-
 	pack[0].init = init;
 
 	for (i = 0; i < nent; i++) {
 		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
 
 		ent->addr = av->addr;
 		ent->data = av->data;
@@ -100,30 +116,45 @@
 		ent->pitch = 1;
 	}
 
-	return pack;
+	*ppack = pack;
+
+end:
+	gf100_gr_dtor_fw(&fuc);
+	return ret;
 }
 
-static struct gf100_gr_pack *
-gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
+		      struct gf100_gr_pack **ppack)
 {
+	struct gf100_gr_fuc fuc;
 	struct gf100_gr_init *init;
 	struct gf100_gr_pack *pack;
 	/* We don't suppose we will initialize more than 16 classes here... */
 	static const unsigned int max_classes = 16;
-	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
-	int i, classidx = 0;
-	u32 prevclass = 0;
+	u32 classidx = 0, prevclass = 0;
+	int nent;
+	int ret;
+	int i;
+
+	ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+	if (ret)
+		return ret;
+
+	nent = (fuc.size / sizeof(struct gk20a_fw_av));
 
 	pack = vzalloc((sizeof(*pack) * max_classes) +
 		       (sizeof(*init) * (nent + 1)));
-	if (!pack)
-		return ERR_PTR(-ENOMEM);
+	if (!pack) {
+		ret = -ENOMEM;
+		goto end;
+	}
 
 	init = (void *)(pack + max_classes);
 
 	for (i = 0; i < nent; i++) {
 		struct gf100_gr_init *ent = &init[i];
-		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
 		u32 class = av->addr & 0xffff;
 		u32 addr = (av->addr & 0xffff0000) >> 14;
 
@@ -133,7 +164,8 @@
 			prevclass = class;
 			if (++classidx >= max_classes) {
 				vfree(pack);
-				return ERR_PTR(-ENOSPC);
+				ret = -ENOSPC;
+				goto end;
 			}
 		}
 
@@ -143,7 +175,11 @@
 		ent->pitch = 1;
 	}
 
-	return pack;
+	*ppack = pack;
+
+end:
+	gf100_gr_dtor_fw(&fuc);
+	return ret;
 }
 
 static int
@@ -273,69 +309,8 @@
 	return gf100_gr_init_ctxctl(gr);
 }
 
-void
-gk20a_gr_dtor(struct gf100_gr *gr)
-{
-	gk20a_gr_init_dtor(gr->fuc_method);
-	gk20a_gr_init_dtor(gr->fuc_bundle);
-	gk20a_gr_init_dtor(gr->fuc_sw_ctx);
-	gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
-}
-
-int
-gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
-	      int index, struct nvkm_gr **pgr)
-{
-	struct gf100_gr_fuc fuc;
-	struct gf100_gr *gr;
-	int ret;
-
-	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
-		return -ENOMEM;
-	*pgr = &gr->base;
-
-	ret = gf100_gr_ctor(func, device, index, gr);
-	if (ret)
-		return ret;
-
-	ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
-	if (ret)
-		return ret;
-	gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
-	gf100_gr_dtor_fw(&fuc);
-	if (IS_ERR(gr->fuc_sw_nonctx))
-		return PTR_ERR(gr->fuc_sw_nonctx);
-
-	ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
-	if (ret)
-		return ret;
-	gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
-	gf100_gr_dtor_fw(&fuc);
-	if (IS_ERR(gr->fuc_sw_ctx))
-		return PTR_ERR(gr->fuc_sw_ctx);
-
-	ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
-	if (ret)
-		return ret;
-	gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
-	gf100_gr_dtor_fw(&fuc);
-	if (IS_ERR(gr->fuc_bundle))
-		return PTR_ERR(gr->fuc_bundle);
-
-	ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
-	if (ret)
-		return ret;
-	gr->fuc_method = gk20a_gr_av_to_method(&fuc);
-	gf100_gr_dtor_fw(&fuc);
-	if (IS_ERR(gr->fuc_method))
-		return PTR_ERR(gr->fuc_method);
-
-	return 0;
-}
-
 static const struct gf100_gr_func
 gk20a_gr = {
-	.dtor = gk20a_gr_dtor,
 	.init = gk20a_gr_init,
 	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.ppc_nr = 1,
@@ -352,5 +327,39 @@
 int
 gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+	struct gf100_gr *gr;
+	int ret;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
+	if (ret)
+		return ret;
+
+	if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+	    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+	    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+	    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+		return -ENODEV;
+
+	ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
+	if (ret)
+		return ret;
+
+	ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
+	if (ret)
+		return ret;
+
+	ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
+	if (ret)
+		return ret;
+
+	ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
+	if (ret)
+		return ret;
+
+
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
new file mode 100644
index 0000000..058fc1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/secboot.h>
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+int
+gm200_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {}, tmp;
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc, ppc, rop;
+	int i;
+
+	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
+
+	/*XXX: belongs in fb */
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
+
+	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+	gm107_gr_init_bios(gr);
+
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x405848, 0xc0000000);
+	nvkm_wr32(device, 0x40584c, 0x00000001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+		}
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
+}
+
+int
+gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
+{
+	struct gf100_gr *gr;
+	int ret;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	ret = gf100_gr_ctor(func, device, index, gr);
+	if (ret)
+		return ret;
+
+	/* Load firmwares for non-secure falcons */
+	if (!nvkm_secboot_is_managed(device->secboot,
+				     NVKM_SECBOOT_FALCON_FECS)) {
+		if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
+		    (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
+			return ret;
+	}
+	if (!nvkm_secboot_is_managed(device->secboot,
+				     NVKM_SECBOOT_FALCON_GPCCS)) {
+		if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
+		    (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
+			return ret;
+	}
+
+	if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
+	    (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
+	    (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
+	    (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
+		return ret;
+
+	return 0;
+}
+
+static const struct gf100_gr_func
+gm200_gr = {
+	.init = gm200_gr_init,
+	.ppc_nr = 2,
+	.grctx = &gm200_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gm200_gr_new_(&gm200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
deleted file mode 100644
index 90381dd..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "gf100.h"
-#include "ctxgf100.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * PGRAPH register lists
- ******************************************************************************/
-
-static const struct gf100_gr_init
-gm204_gr_init_main_0[] = {
-	{ 0x400080,   1, 0x04, 0x003003e2 },
-	{ 0x400088,   1, 0x04, 0xe007bfe7 },
-	{ 0x40008c,   1, 0x04, 0x00060000 },
-	{ 0x400090,   1, 0x04, 0x00000030 },
-	{ 0x40013c,   1, 0x04, 0x003901f3 },
-	{ 0x400140,   1, 0x04, 0x00000100 },
-	{ 0x400144,   1, 0x04, 0x00000000 },
-	{ 0x400148,   1, 0x04, 0x00000110 },
-	{ 0x400138,   1, 0x04, 0x00000000 },
-	{ 0x400130,   2, 0x04, 0x00000000 },
-	{ 0x400124,   1, 0x04, 0x00000002 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_fe_0[] = {
-	{ 0x40415c,   1, 0x04, 0x00000000 },
-	{ 0x404170,   1, 0x04, 0x00000000 },
-	{ 0x4041b4,   1, 0x04, 0x00000000 },
-	{ 0x4041b8,   1, 0x04, 0x00000010 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_ds_0[] = {
-	{ 0x40583c,   1, 0x04, 0x00000000 },
-	{ 0x405844,   1, 0x04, 0x00ffffff },
-	{ 0x40584c,   1, 0x04, 0x00000001 },
-	{ 0x405850,   1, 0x04, 0x00000000 },
-	{ 0x405900,   1, 0x04, 0x00000000 },
-	{ 0x405908,   1, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sked_0[] = {
-	{ 0x407010,   1, 0x04, 0x00000000 },
-	{ 0x407040,   1, 0x04, 0x80440434 },
-	{ 0x407048,   1, 0x04, 0x00000008 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_tpccs_0[] = {
-	{ 0x419d60,   1, 0x04, 0x0000003f },
-	{ 0x419d88,   3, 0x04, 0x00000000 },
-	{ 0x419dc4,   1, 0x04, 0x00000000 },
-	{ 0x419dc8,   1, 0x04, 0x00000501 },
-	{ 0x419dd0,   1, 0x04, 0x00000000 },
-	{ 0x419dd4,   1, 0x04, 0x00000100 },
-	{ 0x419dd8,   1, 0x04, 0x00000001 },
-	{ 0x419ddc,   1, 0x04, 0x00000002 },
-	{ 0x419de0,   1, 0x04, 0x00000001 },
-	{ 0x419de8,   1, 0x04, 0x000000cc },
-	{ 0x419dec,   1, 0x04, 0x00000000 },
-	{ 0x419df0,   1, 0x04, 0x000000cc },
-	{ 0x419df4,   1, 0x04, 0x00000000 },
-	{ 0x419d0c,   1, 0x04, 0x00000000 },
-	{ 0x419d10,   1, 0x04, 0x00000014 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_pe_0[] = {
-	{ 0x419900,   1, 0x04, 0x000000ff },
-	{ 0x419810,   1, 0x04, 0x00000000 },
-	{ 0x41980c,   1, 0x04, 0x00000010 },
-	{ 0x419844,   1, 0x04, 0x00000000 },
-	{ 0x419838,   1, 0x04, 0x000000ff },
-	{ 0x419850,   1, 0x04, 0x00000004 },
-	{ 0x419854,   2, 0x04, 0x00000000 },
-	{ 0x419894,   3, 0x04, 0x00100401 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sm_0[] = {
-	{ 0x419e30,   1, 0x04, 0x000000ff },
-	{ 0x419e00,   1, 0x04, 0x00000000 },
-	{ 0x419ea0,   1, 0x04, 0x00000000 },
-	{ 0x419ee4,   1, 0x04, 0x00000000 },
-	{ 0x419ea4,   1, 0x04, 0x00000100 },
-	{ 0x419ea8,   1, 0x04, 0x00000000 },
-	{ 0x419ee8,   1, 0x04, 0x00000091 },
-	{ 0x419eb4,   1, 0x04, 0x00000000 },
-	{ 0x419ebc,   2, 0x04, 0x00000000 },
-	{ 0x419edc,   1, 0x04, 0x000c1810 },
-	{ 0x419ed8,   1, 0x04, 0x00000000 },
-	{ 0x419ee0,   1, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_l1c_1[] = {
-	{ 0x419cf8,   2, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sm_1[] = {
-	{ 0x419f74,   1, 0x04, 0x00055155 },
-	{ 0x419f80,   4, 0x04, 0x00000000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_l1c_2[] = {
-	{ 0x419ccc,   2, 0x04, 0x00000000 },
-	{ 0x419c80,   1, 0x04, 0x3f006022 },
-	{ 0x419c88,   1, 0x04, 0x00210000 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_pes_0[] = {
-	{ 0x41be50,   1, 0x04, 0x000000ff },
-	{ 0x41be04,   1, 0x04, 0x00000000 },
-	{ 0x41be08,   1, 0x04, 0x00000004 },
-	{ 0x41be0c,   1, 0x04, 0x00000008 },
-	{ 0x41be10,   1, 0x04, 0x2e3b8bc7 },
-	{ 0x41be14,   2, 0x04, 0x00000000 },
-	{ 0x41be3c,   5, 0x04, 0x00100401 },
-	{}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_be_0[] = {
-	{ 0x408890,   1, 0x04, 0x000000ff },
-	{ 0x40880c,   1, 0x04, 0x00000000 },
-	{ 0x408850,   1, 0x04, 0x00000004 },
-	{ 0x408878,   1, 0x04, 0x01b4201c },
-	{ 0x40887c,   1, 0x04, 0x80004c55 },
-	{ 0x408880,   1, 0x04, 0x0018c258 },
-	{ 0x408884,   1, 0x04, 0x0000160f },
-	{ 0x408974,   1, 0x04, 0x000000ff },
-	{ 0x408910,   9, 0x04, 0x00000000 },
-	{ 0x408950,   1, 0x04, 0x00000000 },
-	{ 0x408954,   1, 0x04, 0x0000ffff },
-	{ 0x408958,   1, 0x04, 0x00000034 },
-	{ 0x40895c,   1, 0x04, 0x84b17403 },
-	{ 0x408960,   1, 0x04, 0x04c1884f },
-	{ 0x408964,   1, 0x04, 0x04714445 },
-	{ 0x408968,   1, 0x04, 0x0280802f },
-	{ 0x40896c,   1, 0x04, 0x04304856 },
-	{ 0x408970,   1, 0x04, 0x00012800 },
-	{ 0x408984,   1, 0x04, 0x00000000 },
-	{ 0x408988,   1, 0x04, 0x08040201 },
-	{ 0x40898c,   1, 0x04, 0x80402010 },
-	{}
-};
-
-const struct gf100_gr_pack
-gm204_gr_pack_mmio[] = {
-	{ gm204_gr_init_main_0 },
-	{ gm204_gr_init_fe_0 },
-	{ gf100_gr_init_pri_0 },
-	{ gf100_gr_init_rstr2d_0 },
-	{ gf100_gr_init_pd_0 },
-	{ gm204_gr_init_ds_0 },
-	{ gm107_gr_init_scc_0 },
-	{ gm204_gr_init_sked_0 },
-	{ gk110_gr_init_cwd_0 },
-	{ gm107_gr_init_prop_0 },
-	{ gk208_gr_init_gpc_unk_0 },
-	{ gf100_gr_init_setup_0 },
-	{ gf100_gr_init_crstr_0 },
-	{ gm107_gr_init_setup_1 },
-	{ gm107_gr_init_zcull_0 },
-	{ gf100_gr_init_gpm_0 },
-	{ gm107_gr_init_gpc_unk_1 },
-	{ gf100_gr_init_gcc_0 },
-	{ gm204_gr_init_tpccs_0 },
-	{ gm107_gr_init_tex_0 },
-	{ gm204_gr_init_pe_0 },
-	{ gm107_gr_init_l1c_0 },
-	{ gf100_gr_init_mpc_0 },
-	{ gm204_gr_init_sm_0 },
-	{ gm204_gr_init_l1c_1 },
-	{ gm204_gr_init_sm_1 },
-	{ gm204_gr_init_l1c_2 },
-	{ gm204_gr_init_pes_0 },
-	{ gm107_gr_init_wwdx_0 },
-	{ gm107_gr_init_cbm_0 },
-	{ gm204_gr_init_be_0 },
-	{}
-};
-
-const struct gf100_gr_pack *
-gm204_gr_data[] = {
-	gm204_gr_pack_mmio,
-	NULL
-};
-
-/*******************************************************************************
- * PGRAPH engine/subdev functions
- ******************************************************************************/
-
-static int
-gm204_gr_init_ctxctl(struct gf100_gr *gr)
-{
-	return 0;
-}
-
-int
-gm204_gr_init(struct gf100_gr *gr)
-{
-	struct nvkm_device *device = gr->base.engine.subdev.device;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
-	u32 data[TPC_MAX / 8] = {}, tmp;
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, ppc, rop;
-	int i;
-
-	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
-	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
-	nvkm_wr32(device, 0x418890, 0x00000000);
-	nvkm_wr32(device, 0x418894, 0x00000000);
-	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
-	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
-
-	/*XXX: belongs in fb */
-	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
-	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
-	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
-
-	gf100_gr_mmio(gr, gr->func->mmio);
-
-	gm107_gr_init_bios(gr);
-
-	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
-
-	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % gr->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
-	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
-	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
-	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
-			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			gr->tpc_total);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
-	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
-	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
-
-	nvkm_wr32(device, 0x400500, 0x00010001);
-	nvkm_wr32(device, 0x400100, 0xffffffff);
-	nvkm_wr32(device, 0x40013c, 0xffffffff);
-	nvkm_wr32(device, 0x400124, 0x00000002);
-	nvkm_wr32(device, 0x409c24, 0x000e0000);
-	nvkm_wr32(device, 0x405848, 0xc0000000);
-	nvkm_wr32(device, 0x40584c, 0x00000001);
-	nvkm_wr32(device, 0x404000, 0xc0000000);
-	nvkm_wr32(device, 0x404600, 0xc0000000);
-	nvkm_wr32(device, 0x408030, 0xc0000000);
-	nvkm_wr32(device, 0x404490, 0xc0000000);
-	nvkm_wr32(device, 0x406018, 0xc0000000);
-	nvkm_wr32(device, 0x407020, 0x40000000);
-	nvkm_wr32(device, 0x405840, 0xc0000000);
-	nvkm_wr32(device, 0x405844, 0x00ffffff);
-	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
-
-	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
-			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
-		}
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
-	}
-
-	for (rop = 0; rop < gr->rop_nr; rop++) {
-		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
-	}
-
-	nvkm_wr32(device, 0x400108, 0xffffffff);
-	nvkm_wr32(device, 0x400138, 0xffffffff);
-	nvkm_wr32(device, 0x400118, 0xffffffff);
-	nvkm_wr32(device, 0x400130, 0xffffffff);
-	nvkm_wr32(device, 0x40011c, 0xffffffff);
-	nvkm_wr32(device, 0x400134, 0xffffffff);
-
-	nvkm_wr32(device, 0x400054, 0x2c350f63);
-
-	gf100_gr_zbc_init(gr);
-
-	return gm204_gr_init_ctxctl(gr);
-}
-
-static const struct gf100_gr_func
-gm204_gr = {
-	.init = gm204_gr_init,
-	.mmio = gm204_gr_pack_mmio,
-	.ppc_nr = 2,
-	.grctx = &gm204_grctx,
-	.sclass = {
-		{ -1, -1, FERMI_TWOD_A },
-		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
-		{ -1, -1, MAXWELL_B, &gf100_fermi },
-		{ -1, -1, MAXWELL_COMPUTE_B },
-		{}
-	}
-};
-
-int
-gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
-{
-	return gf100_gr_new_(&gm204_gr, device, index, pgr);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
deleted file mode 100644
index 341dc560..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "gf100.h"
-#include "ctxgf100.h"
-
-#include <nvif/class.h>
-
-static const struct gf100_gr_func
-gm206_gr = {
-	.init = gm204_gr_init,
-	.mmio = gm204_gr_pack_mmio,
-	.ppc_nr = 2,
-	.grctx = &gm206_grctx,
-	.sclass = {
-		{ -1, -1, FERMI_TWOD_A },
-		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
-		{ -1, -1, MAXWELL_B, &gf100_fermi },
-		{ -1, -1, MAXWELL_COMPUTE_B },
-		{}
-	}
-};
-
-int
-gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
-{
-	return gf100_gr_new_(&gm206_gr, device, index, pgr);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 65b6e3d..29732bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -32,12 +32,15 @@
 	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 val;
 
-	/* TODO this needs to be removed once secure boot works */
-	if (1) {
+	/* Bypass MMU check for non-secure boot */
+	if (!device->secboot) {
 		nvkm_wr32(device, 0x100ce4, 0xffffffff);
+
+		if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
+			nvdev_warn(device,
+			  "cannot bypass secure boot - expect failure soon!\n");
 	}
 
-	/* TODO update once secure boot works */
 	val = nvkm_rd32(device, 0x100c80);
 	val &= 0xf000087f;
 	nvkm_wr32(device, 0x418880, val);
@@ -61,7 +64,6 @@
 
 static const struct gf100_gr_func
 gm20b_gr = {
-	.dtor = gk20a_gr_dtor,
 	.init = gk20a_gr_init,
 	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
 	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
@@ -79,5 +81,5 @@
 int
 gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
 {
-	return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+	return gm200_gr_new_(&gm20b_gr, device, index, pgr);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
new file mode 100644
index 0000000..b511956
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/msenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
new file mode 100644
index 0000000..13b7c71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/nvdec/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
new file mode 100644
index 0000000..ad8f182
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/nvenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 4bef72a..3fda594 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -59,9 +59,11 @@
 nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
 	struct nvkm_device *device = pm->engine.subdev.device;
-	if (pm->sequence != pm->sequence) {
+	struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
+
+	if (nv40pm->sequence != pm->sequence) {
 		nvkm_wr32(device, 0x400084, 0x00000020);
-		pm->sequence = pm->sequence;
+		nv40pm->sequence = pm->sequence;
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
new file mode 100644
index 0000000..ed4fb64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/vic/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index ee2c38f..642d27d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -8,6 +8,7 @@
 include $(src)/nvkm/subdev/gpio/Kbuild
 include $(src)/nvkm/subdev/i2c/Kbuild
 include $(src)/nvkm/subdev/ibus/Kbuild
+include $(src)/nvkm/subdev/iccsense/Kbuild
 include $(src)/nvkm/subdev/instmem/Kbuild
 include $(src)/nvkm/subdev/ltc/Kbuild
 include $(src)/nvkm/subdev/mc/Kbuild
@@ -15,6 +16,7 @@
 include $(src)/nvkm/subdev/mxm/Kbuild
 include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/secboot/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
 include $(src)/nvkm/subdev/volt/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index 64730d5..dbcb0ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -10,6 +10,7 @@
 nvkm-y += nvkm/subdev/bios/fan.o
 nvkm-y += nvkm/subdev/bios/gpio.o
 nvkm-y += nvkm/subdev/bios/i2c.o
+nvkm-y += nvkm/subdev/bios/iccsense.o
 nvkm-y += nvkm/subdev/bios/image.o
 nvkm-y += nvkm/subdev/bios/init.o
 nvkm-y += nvkm/subdev/bios/mxm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index c9e6f6f..b857835 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -32,7 +32,7 @@
 	u16 dcb, extdev = 0;
 
 	dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
-	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
+	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40 && dcb_ver != 0x41))
 		return 0x0000;
 
 	extdev = nvbios_rd16(bios, dcb + 18);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
new file mode 100644
index 0000000..0843280
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/iccsense.h>
+
+static u16
+nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
+		      u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 iccsense;
+
+	if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
+	    bit_P.length < 0x2c)
+		return 0;
+
+	iccsense = nvbios_rd16(bios, bit_P.offset + 0x28);
+	if (!iccsense)
+		return 0;
+
+	*ver = nvbios_rd08(bios, iccsense + 0);
+	switch (*ver) {
+	case 0x10:
+	case 0x20:
+		*hdr = nvbios_rd08(bios, iccsense + 1);
+		*len = nvbios_rd08(bios, iccsense + 2);
+		*cnt = nvbios_rd08(bios, iccsense + 3);
+		return iccsense;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int
+nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
+{
+	struct nvkm_subdev *subdev = &bios->subdev;
+	u8 ver, hdr, cnt, len, i;
+	u16 table, entry;
+
+	table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
+	if (!table || !cnt)
+		return -EINVAL;
+
+	if (ver != 0x10 && ver != 0x20) {
+		nvkm_error(subdev, "ICCSENSE version 0x%02x unknown\n", ver);
+		return -EINVAL;
+	}
+
+	iccsense->nr_entry = cnt;
+	iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL);
+	if (!iccsense->rail)
+		return -ENOMEM;
+
+	for (i = 0; i < cnt; ++i) {
+		struct pwr_rail_t *rail = &iccsense->rail[i];
+		entry = table + hdr + i * len;
+
+		switch(ver) {
+		case 0x10:
+			rail->mode = nvbios_rd08(bios, entry + 0x1);
+			rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
+			rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
+			rail->rail = nvbios_rd08(bios, entry + 0x4);
+			break;
+		case 0x20:
+			rail->mode = nvbios_rd08(bios, entry);
+			rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
+			rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
+			rail->rail = nvbios_rd08(bios, entry + 0x6);
+			break;
+		};
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index a7d69ce..38ed09f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -786,20 +786,20 @@
 }
 
 /**
- * INIT_DP_CONDITION - opcode 0x3a
+ * INIT_GENERIC_CONDITION - opcode 0x3a
  *
  */
 static void
-init_dp_condition(struct nvbios_init *init)
+init_generic_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
 	struct nvbios_dpout info;
 	u8  cond = nvbios_rd08(bios, init->offset + 1);
-	u8  unkn = nvbios_rd08(bios, init->offset + 2);
+	u8  size = nvbios_rd08(bios, init->offset + 2);
 	u8  ver, hdr, cnt, len;
 	u16 data;
 
-	trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
+	trace("GENERIC_CONDITION\t0x%02x 0x%02x\n", cond, size);
 	init->offset += 3;
 
 	switch (cond) {
@@ -828,7 +828,8 @@
 			init_exec_set(init, false);
 		break;
 	default:
-		warn("unknown dp condition 0x%02x\n", cond);
+		warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
+		init->offset += size;
 		break;
 	}
 }
@@ -2205,7 +2206,7 @@
 	[0x37] = { init_copy },
 	[0x38] = { init_not },
 	[0x39] = { init_io_flag_condition },
-	[0x3a] = { init_dp_condition },
+	[0x3a] = { init_generic_condition },
 	[0x3b] = { init_io_mask_or },
 	[0x3c] = { init_io_or },
 	[0x47] = { init_andn_reg },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index ed7717b..87d9488 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,6 +8,7 @@
 nvkm-y += nvkm/subdev/clk/gf100.o
 nvkm-y += nvkm/subdev/clk/gk104.o
 nvkm-y += nvkm/subdev/clk/gk20a.o
+nvkm-y += nvkm/subdev/clk/gm20b.o
 
 nvkm-y += nvkm/subdev/clk/pllnv04.o
 nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 5da2aa8..5f0ee24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,19 +22,17 @@
  * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
  *
  */
-#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
 #include "priv.h"
+#include "gk20a.h"
 
 #include <core/tegra.h>
 #include <subdev/timer.h>
 
-#define MHZ (1000 * 1000)
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
 
 #define MASK(w)	((1 << w) - 1)
 
-#define SYS_GPCPLL_CFG_BASE			0x00137000
-#define GPC_BCASE_GPCPLL_CFG_BASE		0x00132800
-
 #define GPCPLL_CFG		(SYS_GPCPLL_CFG_BASE + 0)
 #define GPCPLL_CFG_ENABLE	BIT(0)
 #define GPCPLL_CFG_IDDQ		BIT(1)
@@ -56,6 +54,7 @@
 #define GPCPLL_CFG3			(SYS_GPCPLL_CFG_BASE + 0x18)
 #define GPCPLL_CFG3_PLL_STEPB_SHIFT	16
 
+#define GPC_BCASE_GPCPLL_CFG_BASE		0x00132800
 #define GPCPLL_NDIV_SLOWDOWN			(SYS_GPCPLL_CFG_BASE + 0x1c)
 #define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT	0
 #define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT	8
@@ -75,7 +74,7 @@
 #define GPC2CLK_OUT_VCODIV1		0
 #define GPC2CLK_OUT_VCODIV_MASK		(MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
 					GPC2CLK_OUT_VCODIV_SHIFT)
-#define	GPC2CLK_OUT_BYPDIV_WIDTH	6
+#define GPC2CLK_OUT_BYPDIV_WIDTH	6
 #define GPC2CLK_OUT_BYPDIV_SHIFT	0
 #define GPC2CLK_OUT_BYPDIV31		0x3c
 #define GPC2CLK_OUT_INIT_MASK	((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
@@ -92,45 +91,49 @@
 #define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
 	    (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
 
-static const u8 pl_to_div[] = {
+static const u8 _pl_to_div[] = {
 /* PL:   0, 1, 2, 3, 4, 5, 6,  7,  8,  9, 10, 11, 12, 13, 14 */
 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
 };
 
-/* All frequencies in Mhz */
-struct gk20a_clk_pllg_params {
-	u32 min_vco, max_vco;
-	u32 min_u, max_u;
-	u32 min_m, max_m;
-	u32 min_n, max_n;
-	u32 min_pl, max_pl;
-};
+static u32 pl_to_div(u32 pl)
+{
+	if (pl >= ARRAY_SIZE(_pl_to_div))
+		return 1;
+
+	return _pl_to_div[pl];
+}
+
+static u32 div_to_pl(u32 div)
+{
+	u32 pl;
+
+	for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
+		if (_pl_to_div[pl] >= div)
+			return pl;
+	}
+
+	return ARRAY_SIZE(_pl_to_div) - 1;
+}
 
 static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
-	.min_vco = 1000, .max_vco = 2064,
-	.min_u = 12, .max_u = 38,
+	.min_vco = 1000000, .max_vco = 2064000,
+	.min_u = 12000, .max_u = 38000,
 	.min_m = 1, .max_m = 255,
 	.min_n = 8, .max_n = 255,
 	.min_pl = 1, .max_pl = 32,
 };
 
-struct gk20a_clk {
-	struct nvkm_clk base;
-	const struct gk20a_clk_pllg_params *params;
-	u32 m, n, pl;
-	u32 parent_rate;
-};
-
 static void
-gk20a_pllg_read_mnp(struct gk20a_clk *clk)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
 {
 	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
 	val = nvkm_rd32(device, GPCPLL_COEFF);
-	clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-	clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
-	clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+	pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+	pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+	pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
 static u32
@@ -139,8 +142,8 @@
 	u32 rate;
 	u32 divider;
 
-	rate = clk->parent_rate * clk->n;
-	divider = clk->m * pl_to_div[clk->pl];
+	rate = clk->parent_rate * clk->pll.n;
+	divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
 
 	return rate / divider / 2;
 }
@@ -152,15 +155,13 @@
 	u32 target_clk_f, ref_clk_f, target_freq;
 	u32 min_vco_f, max_vco_f;
 	u32 low_pl, high_pl, best_pl;
-	u32 target_vco_f, vco_f;
+	u32 target_vco_f;
 	u32 best_m, best_n;
-	u32 u_f;
-	u32 m, n, n2;
-	u32 delta, lwv, best_delta = ~0;
+	u32 best_delta = ~0;
 	u32 pl;
 
-	target_clk_f = rate * 2 / MHZ;
-	ref_clk_f = clk->parent_rate / MHZ;
+	target_clk_f = rate * 2 / KHZ;
+	ref_clk_f = clk->parent_rate / KHZ;
 
 	max_vco_f = clk->params->max_vco;
 	min_vco_f = clk->params->min_vco;
@@ -176,33 +177,26 @@
 	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
 	high_pl = min(high_pl, clk->params->max_pl);
 	high_pl = max(high_pl, clk->params->min_pl);
+	high_pl = clk->div_to_pl(high_pl);
 
 	/* min_pl <= low_pl <= max_pl */
 	low_pl = min_vco_f / target_vco_f;
 	low_pl = min(low_pl, clk->params->max_pl);
 	low_pl = max(low_pl, clk->params->min_pl);
-
-	/* Find Indices of high_pl and low_pl */
-	for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
-		if (pl_to_div[pl] >= low_pl) {
-			low_pl = pl;
-			break;
-		}
-	}
-	for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
-		if (pl_to_div[pl] >= high_pl) {
-			high_pl = pl;
-			break;
-		}
-	}
+	low_pl = clk->div_to_pl(low_pl);
 
 	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
-		   pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+		   clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
 
 	/* Select lowest possible VCO */
 	for (pl = low_pl; pl <= high_pl; pl++) {
-		target_vco_f = target_clk_f * pl_to_div[pl];
+		u32 m, n, n2;
+
+		target_vco_f = target_clk_f * clk->pl_to_div(pl);
+
 		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
+			u32 u_f, vco_f;
+
 			u_f = ref_clk_f / m;
 
 			if (u_f < clk->params->min_u)
@@ -225,8 +219,10 @@
 				vco_f = ref_clk_f * n / m;
 
 				if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
-					lwv = (vco_f + (pl_to_div[pl] / 2))
-						/ pl_to_div[pl];
+					u32 delta, lwv;
+
+					lwv = (vco_f + (clk->pl_to_div(pl) / 2))
+						/ clk->pl_to_div(pl);
 					delta = abs(lwv - target_clk_f);
 
 					if (delta < best_delta) {
@@ -249,17 +245,18 @@
 	if (best_delta != 0)
 		nvkm_debug(subdev,
 			   "no best match for target @ %dMHz on gpc_pll",
-			   target_clk_f);
+			   target_clk_f / KHZ);
 
-	clk->m = best_m;
-	clk->n = best_n;
-	clk->pl = best_pl;
+	clk->pll.m = best_m;
+	clk->pll.n = best_n;
+	clk->pll.pl = best_pl;
 
-	target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
+	target_freq = gk20a_pllg_calc_rate(clk);
 
 	nvkm_debug(subdev,
 		   "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-		   target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
+		   target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
+		   clk->pl_to_div(clk->pll.pl));
 	return 0;
 }
 
@@ -323,17 +320,19 @@
 }
 
 static void
-_gk20a_pllg_enable(struct gk20a_clk *clk)
+gk20a_pllg_enable(struct gk20a_clk *clk)
 {
 	struct nvkm_device *device = clk->base.subdev.device;
+
 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
 	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static void
-_gk20a_pllg_disable(struct gk20a_clk *clk)
+gk20a_pllg_disable(struct gk20a_clk *clk)
 {
 	struct nvkm_device *device = clk->base.subdev.device;
+
 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
 	nvkm_rd32(device, GPCPLL_CFG);
 }
@@ -344,25 +343,26 @@
 	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvkm_device *device = subdev->device;
 	u32 val, cfg;
-	u32 m_old, pl_old, n_lo;
+	struct gk20a_pll old_pll;
+	u32 n_lo;
 
 	/* get old coefficients */
-	val = nvkm_rd32(device, GPCPLL_COEFF);
-	m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-	pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+	gk20a_pllg_read_mnp(clk, &old_pll);
 
 	/* do NDIV slide if there is no change in M and PL */
 	cfg = nvkm_rd32(device, GPCPLL_CFG);
-	if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
-	    (cfg & GPCPLL_CFG_ENABLE)) {
-		return gk20a_pllg_slide(clk, clk->n);
+	if (allow_slide && clk->pll.m == old_pll.m &&
+	    clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
+		return gk20a_pllg_slide(clk, clk->pll.n);
 	}
 
 	/* slide down to NDIV_LO */
-	n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
-			    clk->parent_rate / MHZ);
 	if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-		int ret = gk20a_pllg_slide(clk, n_lo);
+		int ret;
+
+		n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
+				    clk->parent_rate / KHZ);
+		ret = gk20a_pllg_slide(clk, n_lo);
 
 		if (ret)
 			return ret;
@@ -387,19 +387,19 @@
 		udelay(2);
 	}
 
-	_gk20a_pllg_disable(clk);
+	gk20a_pllg_disable(clk);
 
 	nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
-		   clk->m, clk->n, clk->pl);
+		   clk->pll.m, clk->pll.n, clk->pll.pl);
 
-	n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
-			    clk->parent_rate / MHZ);
-	val = clk->m << GPCPLL_COEFF_M_SHIFT;
-	val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
-	val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+	n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
+			    clk->parent_rate / KHZ);
+	val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
+	val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
+	val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
 	nvkm_wr32(device, GPCPLL_COEFF, val);
 
-	_gk20a_pllg_enable(clk);
+	gk20a_pllg_enable(clk);
 
 	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
@@ -414,16 +414,24 @@
 		return -ETIMEDOUT;
 
 	/* switch to VCO mode */
-	nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+		  BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 
 	/* restore out divider 1:1 */
 	val = nvkm_rd32(device, GPC2CLK_OUT);
-	val &= ~GPC2CLK_OUT_VCODIV_MASK;
-	udelay(2);
-	nvkm_wr32(device, GPC2CLK_OUT, val);
+	if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
+	    (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
+		val &= ~GPC2CLK_OUT_VCODIV_MASK;
+		val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
+		udelay(2);
+		nvkm_wr32(device, GPC2CLK_OUT, val);
+		/* Intentional 2nd write to assure linear divider operation */
+		nvkm_wr32(device, GPC2CLK_OUT, val);
+		nvkm_rd32(device, GPC2CLK_OUT);
+	}
 
 	/* slide up to new NDIV */
-	return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
+	return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
 }
 
 static int
@@ -438,32 +446,6 @@
 	return err;
 }
 
-static void
-gk20a_pllg_disable(struct gk20a_clk *clk)
-{
-	struct nvkm_device *device = clk->base.subdev.device;
-	u32 val;
-
-	/* slide to VCO min */
-	val = nvkm_rd32(device, GPCPLL_CFG);
-	if (val & GPCPLL_CFG_ENABLE) {
-		u32 coeff, m, n_lo;
-
-		coeff = nvkm_rd32(device, GPCPLL_COEFF);
-		m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-		n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
-				    clk->parent_rate / MHZ);
-		gk20a_pllg_slide(clk, n_lo);
-	}
-
-	/* put PLL in bypass before disabling it */
-	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
-	_gk20a_pllg_disable(clk);
-}
-
-#define GK20A_CLK_GPC_MDIV 1000
-
 static struct nvkm_pstate
 gk20a_pstates[] = {
 	{
@@ -558,7 +540,7 @@
 	},
 };
 
-static int
+int
 gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
 	struct gk20a_clk *clk = gk20a_clk(base);
@@ -569,7 +551,7 @@
 	case nv_clk_src_crystal:
 		return device->crystal;
 	case nv_clk_src_gpc:
-		gk20a_pllg_read_mnp(clk);
+		gk20a_pllg_read_mnp(clk, &clk->pll);
 		return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
 	default:
 		nvkm_error(subdev, "invalid clock source %d\n", src);
@@ -577,7 +559,7 @@
 	}
 }
 
-static int
+int
 gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
 	struct gk20a_clk *clk = gk20a_clk(base);
@@ -586,7 +568,7 @@
 					 GK20A_CLK_GPC_MDIV);
 }
 
-static int
+int
 gk20a_clk_prog(struct nvkm_clk *base)
 {
 	struct gk20a_clk *clk = gk20a_clk(base);
@@ -594,15 +576,33 @@
 	return gk20a_pllg_program_mnp(clk);
 }
 
-static void
+void
 gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static void
+void
 gk20a_clk_fini(struct nvkm_clk *base)
 {
+	struct nvkm_device *device = base->subdev.device;
 	struct gk20a_clk *clk = gk20a_clk(base);
+	u32 val;
+
+	/* slide to VCO min */
+	val = nvkm_rd32(device, GPCPLL_CFG);
+	if (val & GPCPLL_CFG_ENABLE) {
+		struct gk20a_pll pll;
+		u32 n_lo;
+
+		gk20a_pllg_read_mnp(clk, &pll);
+		n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
+				    clk->parent_rate / KHZ);
+		gk20a_pllg_slide(clk, n_lo);
+	}
+
+	/* put PLL in bypass before disabling it */
+	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
 	gk20a_pllg_disable(clk);
 }
 
@@ -614,9 +614,12 @@
 	struct nvkm_device *device = subdev->device;
 	int ret;
 
-	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+		  GPC2CLK_OUT_INIT_VAL);
 
-	ret = gk20a_clk_prog(&clk->base);
+	/* Start with lowest frequency */
+	base->func->calc(base, &base->func->pstates[0].base);
+	ret = base->func->prog(&clk->base);
 	if (ret) {
 		nvkm_error(subdev, "cannot initialize clock\n");
 		return ret;
@@ -643,27 +646,50 @@
 };
 
 int
-gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+_gk20a_clk_ctor(struct nvkm_device *device, int index,
+		const struct nvkm_clk_func *func,
+		const struct gk20a_clk_pllg_params *params,
+		struct gk20a_clk *clk)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
-	struct gk20a_clk *clk;
-	int ret, i;
+	int ret;
+	int i;
 
-	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+	/* Finish initializing the pstates */
+	for (i = 0; i < func->nr_pstates; i++) {
+		INIT_LIST_HEAD(&func->pstates[i].list);
+		func->pstates[i].pstate = i + 1;
+	}
+
+	clk->params = params;
+	clk->parent_rate = clk_get_rate(tdev->clk);
+
+	ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
+	if (ret)
+		return ret;
+
+	nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
+		   clk->parent_rate / KHZ);
+
+	return 0;
+}
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	struct gk20a_clk *clk;
+	int ret;
+
+	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+	if (!clk)
 		return -ENOMEM;
 	*pclk = &clk->base;
 
-	/* Finish initializing the pstates */
-	for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
-		INIT_LIST_HEAD(&gk20a_pstates[i].list);
-		gk20a_pstates[i].pstate = i + 1;
-	}
+	ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+			      clk);
 
-	clk->params = &gk20a_pllg_params;
-	clk->parent_rate = clk_get_rate(tdev->clk);
+	clk->pl_to_div = pl_to_div;
+	clk->div_to_pl = div_to_pl;
 
-	ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
-	nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
-		  clk->parent_rate / MHZ);
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
new file mode 100644
index 0000000..13c4674
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NVKM_CLK_GK20A_H__
+#define __NVKM_CLK_GK20A_H__
+
+#define GK20A_CLK_GPC_MDIV 1000
+
+#define SYS_GPCPLL_CFG_BASE	0x00137000
+
+/* All frequencies in Khz */
+struct gk20a_clk_pllg_params {
+	u32 min_vco, max_vco;
+	u32 min_u, max_u;
+	u32 min_m, max_m;
+	u32 min_n, max_n;
+	u32 min_pl, max_pl;
+};
+
+struct gk20a_pll {
+	u32 m;
+	u32 n;
+	u32 pl;
+};
+
+struct gk20a_clk {
+	struct nvkm_clk base;
+	const struct gk20a_clk_pllg_params *params;
+	struct gk20a_pll pll;
+	u32 parent_rate;
+
+	u32 (*div_to_pl)(u32);
+	u32 (*pl_to_div)(u32);
+};
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+
+int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+		    const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
+void gk20a_clk_fini(struct nvkm_clk *);
+int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int gk20a_clk_prog(struct nvkm_clk *);
+void gk20a_clk_tidy(struct nvkm_clk *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
new file mode 100644
index 0000000..71b2bbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/clk.h>
+#include <core/device.h>
+
+#include "priv.h"
+#include "gk20a.h"
+
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w)	((1 << w) - 1)
+
+#define BYPASSCTRL_SYS	(SYS_GPCPLL_CFG_BASE + 0x340)
+#define BYPASSCTRL_SYS_GPCPLL_SHIFT	0
+#define BYPASSCTRL_SYS_GPCPLL_WIDTH	1
+
+static u32 pl_to_div(u32 pl)
+{
+	return pl;
+}
+
+static u32 div_to_pl(u32 div)
+{
+	return div;
+}
+
+static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
+	.min_vco = 1300000, .max_vco = 2600000,
+	.min_u = 12000, .max_u = 38400,
+	.min_m = 1, .max_m = 255,
+	.min_n = 8, .max_n = 255,
+	.min_pl = 1, .max_pl = 31,
+};
+
+static struct nvkm_pstate
+gm20b_pstates[] = {
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 76800,
+			.voltage = 0,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 153600,
+			.voltage = 1,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 230400,
+			.voltage = 2,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 307200,
+			.voltage = 3,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 384000,
+			.voltage = 4,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 460800,
+			.voltage = 5,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 537600,
+			.voltage = 6,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 614400,
+			.voltage = 7,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 691200,
+			.voltage = 8,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 768000,
+			.voltage = 9,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 844800,
+			.voltage = 10,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 921600,
+			.voltage = 11,
+		},
+	},
+	{
+		.base = {
+			.domain[nv_clk_src_gpc] = 998400,
+			.voltage = 12,
+		},
+	},
+
+};
+
+static int
+gm20b_clk_init(struct nvkm_clk *base)
+{
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	int ret;
+
+	/* Set the global bypass control to VCO */
+	nvkm_mask(device, BYPASSCTRL_SYS,
+	       MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
+	       0);
+
+	/* Start with lowest frequency */
+	base->func->calc(base, &base->func->pstates[0].base);
+	ret = base->func->prog(&clk->base);
+	if (ret) {
+		nvkm_error(subdev, "cannot initialize clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_clk_func
+gm20b_clk_speedo0 = {
+	.init = gm20b_clk_init,
+	.fini = gk20a_clk_fini,
+	.read = gk20a_clk_read,
+	.calc = gk20a_clk_calc,
+	.prog = gk20a_clk_prog,
+	.tidy = gk20a_clk_tidy,
+	.pstates = gm20b_pstates,
+	.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+		{ nv_clk_src_max },
+	},
+};
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	struct gk20a_clk *clk;
+	int ret;
+
+	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+	if (!clk)
+		return -ENOMEM;
+	*pclk = &clk->base;
+
+	ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+			      &gm20b_pllg_params, clk);
+
+	clk->pl_to_div = pl_to_div;
+	clk->div_to_pl = div_to_pl;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 793e73d..eac88e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -11,4 +11,4 @@
 nvkm-y += nvkm/subdev/devinit/mcp89.o
 nvkm-y += nvkm/subdev/devinit/gf100.o
 nvkm-y += nvkm/subdev/devinit/gm107.o
-nvkm-y += nvkm/subdev/devinit/gm204.o
+nvkm-y += nvkm/subdev/devinit/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 22b0140..2923598 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -90,9 +90,21 @@
 	return disable;
 }
 
+void
+gf100_devinit_preinit(struct nvkm_devinit *base)
+{
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* This bit is set by devinit, and flips back to 0 on suspend */
+	if (!base->post)
+		base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
+}
+
 static const struct nvkm_devinit_func
 gf100_devinit = {
-	.preinit = nv50_devinit_preinit,
+	.preinit = gf100_devinit_preinit,
 	.init = nv50_devinit_init,
 	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 2be98bd..28ca01b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -46,7 +46,7 @@
 
 static const struct nvkm_devinit_func
 gm107_devinit = {
-	.preinit = nv50_devinit_preinit,
+	.preinit = gf100_devinit_preinit,
 	.init = nv50_devinit_init,
 	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
similarity index 94%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 2b9c3f1..a410c0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -107,7 +107,7 @@
 }
 
 static int
-gm204_devinit_post(struct nvkm_devinit *base, bool post)
+gm200_devinit_post(struct nvkm_devinit *base, bool post)
 {
 	struct nv50_devinit *init = nv50_devinit(base);
 	struct nvkm_subdev *subdev = &init->base.subdev;
@@ -165,17 +165,17 @@
 }
 
 static const struct nvkm_devinit_func
-gm204_devinit = {
-	.preinit = nv50_devinit_preinit,
+gm200_devinit = {
+	.preinit = gf100_devinit_preinit,
 	.init = nv50_devinit_init,
-	.post = gm204_devinit_post,
+	.post = gm200_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
 };
 
 int
-gm204_devinit_new(struct nvkm_device *device, int index,
+gm200_devinit_new(struct nvkm_device *device, int index,
 		struct nvkm_devinit **pinit)
 {
-	return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+	return nv50_devinit_new_(&gm200_devinit, device, index, pinit);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 337c2c6..c714b09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -93,28 +93,27 @@
 void
 nv50_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nv50_devinit *init = nv50_devinit(base);
-	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_subdev *subdev = &base->subdev;
 	struct nvkm_device *device = subdev->device;
 
 	/* our heuristics can't detect whether the board has had its
 	 * devinit scripts executed or not if the display engine is
 	 * missing, assume it's a secondary gpu which requires post
 	 */
-	if (!init->base.post) {
-		u64 disable = nvkm_devinit_disable(&init->base);
+	if (!base->post) {
+		u64 disable = nvkm_devinit_disable(base);
 		if (disable & (1ULL << NVKM_ENGINE_DISP))
-			init->base.post = true;
+			base->post = true;
 	}
 
 	/* magic to detect whether or not x86 vbios code has executed
 	 * the devinit scripts to initialise the board
 	 */
-	if (!init->base.post) {
+	if (!base->post) {
 		if (!nvkm_rdvgac(device, 0, 0x00) &&
 		    !nvkm_rdvgac(device, 0, 0x1a)) {
 			nvkm_debug(subdev, "adaptor not initialised\n");
-			init->base.post = true;
+			base->post = true;
 		}
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 5de70a8..25d2ae3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -20,6 +20,7 @@
 			struct nvkm_oclass *, void *, u32,
 			struct nvkm_object **);
 int  gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+void gf100_devinit_preinit(struct nvkm_devinit *);
 
 u64  gm107_devinit_disable(struct nvkm_devinit *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 1f730613..48f01e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -6,7 +6,7 @@
 nvkm-y += nvkm/subdev/i2c/gf117.o
 nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
-nvkm-y += nvkm/subdev/i2c/gm204.o
+nvkm-y += nvkm/subdev/i2c/gm200.o
 
 nvkm-y += nvkm/subdev/i2c/pad.o
 nvkm-y += nvkm/subdev/i2c/padnv04.o
@@ -14,7 +14,7 @@
 nvkm-y += nvkm/subdev/i2c/padnv50.o
 nvkm-y += nvkm/subdev/i2c/padg94.o
 nvkm-y += nvkm/subdev/i2c/padgf119.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
+nvkm-y += nvkm/subdev/i2c/padgm200.o
 
 nvkm-y += nvkm/subdev/i2c/bus.o
 nvkm-y += nvkm/subdev/i2c/busnv04.o
@@ -25,6 +25,6 @@
 
 nvkm-y += nvkm/subdev/i2c/aux.o
 nvkm-y += nvkm/subdev/i2c/auxg94.o
-nvkm-y += nvkm/subdev/i2c/auxgm204.o
+nvkm-y += nvkm/subdev/i2c/auxgm200.o
 
 nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 35a892e..fc6b162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -18,7 +18,7 @@
 		      u32 addr, u8 *data, u8 size);
 
 int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
-int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
 
 #define AUX_MSG(b,l,f,a...) do {                                               \
 	struct nvkm_i2c_aux *_aux = (b);                                       \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
similarity index 88%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index bed231b..61d729b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -21,23 +21,23 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base)
 #include "aux.h"
 
-struct gm204_i2c_aux {
+struct gm200_i2c_aux {
 	struct nvkm_i2c_aux base;
 	int ch;
 };
 
 static void
-gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
 {
 	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
 	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
 }
 
 static int
-gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
 {
 	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
 	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
@@ -64,7 +64,7 @@
 		udelay(1);
 		if (!timeout--) {
 			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
-			gm204_i2c_aux_fini(aux);
+			gm200_i2c_aux_fini(aux);
 			return -EBUSY;
 		}
 	} while ((ctrl & 0x03000000) != urep);
@@ -73,10 +73,10 @@
 }
 
 static int
-gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
 		   u8 type, u32 addr, u8 *data, u8 size)
 {
-	struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+	struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
 	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
 	const u32 base = aux->ch * 0x50;
 	u32 ctrl, stat, timeout, retries;
@@ -85,7 +85,7 @@
 
 	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
 
-	ret = gm204_i2c_aux_init(aux);
+	ret = gm200_i2c_aux_init(aux);
 	if (ret < 0)
 		goto out;
 
@@ -155,26 +155,26 @@
 	}
 
 out:
-	gm204_i2c_aux_fini(aux);
+	gm200_i2c_aux_fini(aux);
 	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
 }
 
 static const struct nvkm_i2c_aux_func
-gm204_i2c_aux_func = {
-	.xfer = gm204_i2c_aux_xfer,
+gm200_i2c_aux_func = {
+	.xfer = gm200_i2c_aux_xfer,
 };
 
 int
-gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+gm200_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
 		struct nvkm_i2c_aux **paux)
 {
-	struct gm204_i2c_aux *aux;
+	struct gm200_i2c_aux *aux;
 
 	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
 		return -ENOMEM;
 	*paux = &aux->base;
 
-	nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+	nvkm_i2c_aux_ctor(&gm200_i2c_aux_func, pad, index, &aux->base);
 	aux->ch = drive;
 	aux->base.intr = 1 << aux->ch;
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
similarity index 88%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index ff9f7d6..a23c5f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -25,16 +25,16 @@
 #include "pad.h"
 
 static const struct nvkm_i2c_func
-gm204_i2c = {
+gm200_i2c = {
 	.pad_x_new = gf119_i2c_pad_x_new,
-	.pad_s_new = gm204_i2c_pad_s_new,
+	.pad_s_new = gm200_i2c_pad_s_new,
 	.aux = 8,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
 };
 
 int
-gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
+	return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 9eeb992..316c453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -49,11 +49,11 @@
 int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
 int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
 int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
-int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm200_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
 
 int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
 int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
-int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm200_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
 
 int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
similarity index 81%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
index 24a4d76..7d417f6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
@@ -26,7 +26,7 @@
 #include "bus.h"
 
 static void
-gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+gm200_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
 	struct nvkm_subdev *subdev = &pad->i2c->subdev;
 	struct nvkm_device *device = subdev->device;
@@ -51,26 +51,26 @@
 }
 
 static const struct nvkm_i2c_pad_func
-gm204_i2c_pad_s_func = {
+gm200_i2c_pad_s_func = {
 	.bus_new_4 = gf119_i2c_bus_new,
-	.aux_new_6 = gm204_i2c_aux_new,
-	.mode = gm204_i2c_pad_mode,
+	.aux_new_6 = gm200_i2c_aux_new,
+	.mode = gm200_i2c_pad_mode,
 };
 
 int
-gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+gm200_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
 {
-	return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
+	return nvkm_i2c_pad_new_(&gm200_i2c_pad_s_func, i2c, id, ppad);
 }
 
 static const struct nvkm_i2c_pad_func
-gm204_i2c_pad_x_func = {
+gm200_i2c_pad_x_func = {
 	.bus_new_4 = gf119_i2c_bus_new,
-	.aux_new_6 = gm204_i2c_aux_new,
+	.aux_new_6 = gm200_i2c_aux_new,
 };
 
 int
-gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+gm200_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
 {
-	return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+	return nvkm_i2c_pad_new_(&gm200_i2c_pad_x_func, i2c, id, ppad);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index 7e77a74..ad572d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -2,4 +2,4 @@
 nvkm-y += nvkm/subdev/ibus/gf117.o
 nvkm-y += nvkm/subdev/ibus/gk104.o
 nvkm-y += nvkm/subdev/ibus/gk20a.o
-nvkm-y += nvkm/subdev/ibus/gm204.o
+nvkm-y += nvkm/subdev/ibus/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
similarity index 91%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index b3839dc2..ef0b7f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -24,17 +24,17 @@
 #include "priv.h"
 
 static const struct nvkm_subdev_func
-gm204_ibus = {
+gm200_ibus = {
 	.intr = gk104_ibus_intr,
 };
 
 int
-gm204_ibus_new(struct nvkm_device *device, int index,
+gm200_ibus_new(struct nvkm_device *device, int index,
 	       struct nvkm_subdev **pibus)
 {
 	struct nvkm_subdev *ibus;
 	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
 		return -ENOMEM;
-	nvkm_subdev_ctor(&gm204_ibus, device, index, 0, ibus);
+	nvkm_subdev_ctor(&gm200_ibus, device, index, 0, ibus);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
new file mode 100644
index 0000000..98a4bd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/subdev/iccsense/base.o
+nvkm-y += nvkm/subdev/iccsense/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
new file mode 100644
index 0000000..c44a852
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/extdev.h>
+#include <subdev/bios/iccsense.h>
+#include <subdev/i2c.h>
+
+static bool
+nvkm_iccsense_validate_device(struct i2c_adapter *i2c, u8 addr,
+			      enum nvbios_extdev_type type, u8 rail)
+{
+	switch (type) {
+	case NVBIOS_EXTDEV_INA209:
+	case NVBIOS_EXTDEV_INA219:
+		return rail == 0 && nv_rd16i2cr(i2c, addr, 0x0) >= 0;
+	case NVBIOS_EXTDEV_INA3221:
+		return rail <= 3 &&
+		       nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
+		       nv_rd16i2cr(i2c, addr, 0xfe) == 0x5449;
+	default:
+		return false;
+	}
+}
+
+static int
+nvkm_iccsense_poll_lane(struct i2c_adapter *i2c, u8 addr, u8 shunt_reg,
+			u8 shunt_shift, u8 bus_reg, u8 bus_shift, u8 shunt,
+			u16 lsb)
+{
+	int vshunt = nv_rd16i2cr(i2c, addr, shunt_reg);
+	int vbus = nv_rd16i2cr(i2c, addr, bus_reg);
+
+	if (vshunt < 0 || vbus < 0)
+		return -EINVAL;
+
+	vshunt >>= shunt_shift;
+	vbus >>= bus_shift;
+
+	return vbus * vshunt * lsb / shunt;
+}
+
+static int
+nvkm_iccsense_ina2x9_read(struct nvkm_iccsense *iccsense,
+                          struct nvkm_iccsense_rail *rail,
+			  u8 shunt_reg, u8 bus_reg)
+{
+	return nvkm_iccsense_poll_lane(rail->i2c, rail->addr, shunt_reg, 0,
+				       bus_reg, 3, rail->mohm, 10 * 4);
+}
+
+static int
+nvkm_iccsense_ina209_read(struct nvkm_iccsense *iccsense,
+			  struct nvkm_iccsense_rail *rail)
+{
+	return nvkm_iccsense_ina2x9_read(iccsense, rail, 3, 4);
+}
+
+static int
+nvkm_iccsense_ina219_read(struct nvkm_iccsense *iccsense,
+			  struct nvkm_iccsense_rail *rail)
+{
+	return nvkm_iccsense_ina2x9_read(iccsense, rail, 1, 2);
+}
+
+static int
+nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
+			   struct nvkm_iccsense_rail *rail)
+{
+	return nvkm_iccsense_poll_lane(rail->i2c, rail->addr,
+				       1 + (rail->rail * 2), 3,
+				       2 + (rail->rail * 2), 3, rail->mohm,
+				       40 * 8);
+}
+
+int
+nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx)
+{
+	struct nvkm_iccsense_rail *rail;
+
+	if (!iccsense || idx >= iccsense->rail_count)
+		return -EINVAL;
+
+	rail = &iccsense->rails[idx];
+	if (!rail->read)
+		return -ENODEV;
+
+	return rail->read(iccsense, rail);
+}
+
+int
+nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense)
+{
+	int result = 0, i;
+	for (i = 0; i < iccsense->rail_count; ++i) {
+		int res = nvkm_iccsense_read(iccsense, i);
+		if (res >= 0)
+			result += res;
+		else
+			return res;
+	}
+	return result;
+}
+
+static void *
+nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+
+	if (iccsense->rails)
+		kfree(iccsense->rails);
+
+	return iccsense;
+}
+
+static int
+nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
+	struct nvkm_i2c *i2c = subdev->device->i2c;
+	struct nvbios_iccsense stbl;
+	int i;
+
+	if (!i2c || !bios || nvbios_iccsense_parse(bios, &stbl)
+	    || !stbl.nr_entry)
+		return 0;
+
+	iccsense->rails = kmalloc(sizeof(*iccsense->rails) * stbl.nr_entry,
+	                          GFP_KERNEL);
+	if (!iccsense->rails)
+		return -ENOMEM;
+
+	iccsense->data_valid = true;
+	for (i = 0; i < stbl.nr_entry; ++i) {
+		struct pwr_rail_t *r = &stbl.rail[i];
+		struct nvbios_extdev_func extdev;
+		struct nvkm_iccsense_rail *rail;
+		struct nvkm_i2c_bus *i2c_bus;
+		u8 addr;
+
+		if (!r->mode || r->resistor_mohm == 0)
+			continue;
+
+		if (nvbios_extdev_parse(bios, r->extdev_id, &extdev))
+			continue;
+
+		if (extdev.type == 0xff)
+			continue;
+
+		if (extdev.bus)
+			i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
+		else
+			i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+		if (!i2c_bus)
+			continue;
+
+		addr = extdev.addr >> 1;
+		if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
+						   extdev.type, r->rail)) {
+			iccsense->data_valid = false;
+			nvkm_warn(subdev, "found unknown or invalid rail entry"
+				  " type 0x%x rail %i, power reading might be"
+				  " invalid\n", extdev.type, r->rail);
+			continue;
+		}
+
+		rail = &iccsense->rails[iccsense->rail_count];
+		switch (extdev.type) {
+		case NVBIOS_EXTDEV_INA209:
+			rail->read = nvkm_iccsense_ina209_read;
+			break;
+		case NVBIOS_EXTDEV_INA219:
+			rail->read = nvkm_iccsense_ina219_read;
+			break;
+		case NVBIOS_EXTDEV_INA3221:
+			rail->read = nvkm_iccsense_ina3221_read;
+			break;
+		}
+
+		rail->addr = addr;
+		rail->rail = r->rail;
+		rail->mohm = r->resistor_mohm;
+		rail->i2c = &i2c_bus->i2c;
+		++iccsense->rail_count;
+	}
+	return 0;
+}
+
+struct nvkm_subdev_func iccsense_func = {
+	.oneinit = nvkm_iccsense_oneinit,
+	.dtor = nvkm_iccsense_dtor,
+};
+
+void
+nvkm_iccsense_ctor(struct nvkm_device *device, int index,
+		   struct nvkm_iccsense *iccsense)
+{
+	nvkm_subdev_ctor(&iccsense_func, device, index, 0, &iccsense->subdev);
+}
+
+int
+nvkm_iccsense_new_(struct nvkm_device *device, int index,
+		   struct nvkm_iccsense **iccsense)
+{
+	if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_iccsense_ctor(device, index, *iccsense);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
similarity index 78%
copy from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
copy to drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
index 6511d6e..cccff1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2015 Karol Herbst
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs
+ * Authors: Karol Herbst
  */
-#include "changk104.h"
+#include "priv.h"
 
-#include <nvif/class.h>
-
-const struct nvkm_fifo_chan_oclass
-gm204_fifo_gpfifo_oclass = {
-	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
-	.base.minver = 0,
-	.base.maxver = 0,
-	.ctor = gk104_fifo_gpfifo_new,
-};
+int
+gf100_iccsense_new(struct nvkm_device *device, int index,
+		   struct nvkm_iccsense **piccsense)
+{
+	return nvkm_iccsense_new_(device, index, piccsense);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
new file mode 100644
index 0000000..ed398b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_ICCSENSE_PRIV_H__
+#define __NVKM_ICCSENSE_PRIV_H__
+#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
+#include <subdev/iccsense.h>
+
+struct nvkm_iccsense_rail {
+	int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+	struct i2c_adapter *i2c;
+	u8 addr;
+	u8 rail;
+	u8 mohm;
+};
+
+void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *);
+int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 4c20fec..6b8f2a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -228,6 +228,8 @@
 	struct gk20a_instmem *imem = node->imem;
 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
 
+	/* in case we got a write-combined mapping */
+	wmb();
 	nvkm_ltc_invalidate(ltc);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index f8108df..932b366 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -2,4 +2,4 @@
 nvkm-y += nvkm/subdev/ltc/gf100.o
 nvkm-y += nvkm/subdev/ltc/gk104.o
 nvkm-y += nvkm/subdev/ltc/gm107.o
-nvkm-y += nvkm/subdev/ltc/gm204.o
+nvkm-y += nvkm/subdev/ltc/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index fb0de83..c9eb677 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -129,9 +129,7 @@
 	s64 taken;
 
 	nvkm_wr32(device, 0x70004, 0x00000001);
-	taken = nvkm_wait_msec(device, 2, 0x70004, 0x00000003, 0x00000000);
-	if (taken < 0)
-		nvkm_warn(&ltc->subdev, "LTC invalidate timeout\n");
+	taken = nvkm_wait_msec(device, 2000, 0x70004, 0x00000003, 0x00000000);
 
 	if (taken > 0)
 		nvkm_debug(&ltc->subdev, "LTC invalidate took %lld ns\n", taken);
@@ -144,9 +142,7 @@
 	s64 taken;
 
 	nvkm_wr32(device, 0x70010, 0x00000001);
-	taken = nvkm_wait_msec(device, 2, 0x70010, 0x00000003, 0x00000000);
-	if (taken < 0)
-		nvkm_warn(&ltc->subdev, "LTC flush timeout\n");
+	taken = nvkm_wait_msec(device, 2000, 0x70010, 0x00000003, 0x00000000);
 
 	if (taken > 0)
 		nvkm_debug(&ltc->subdev, "LTC flush took %lld ns\n", taken);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 2af1f9e..e292f56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -43,10 +43,8 @@
 	for (c = 0; c < ltc->ltc_nr; c++) {
 		for (s = 0; s < ltc->lts_nr; s++) {
 			const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
-			nvkm_msec(device, 2000,
-				if (!nvkm_rd32(device, addr))
-					break;
-			);
+			nvkm_wait_msec(device, 2000, addr,
+				       0x00000004, 0x00000000);
 		}
 	}
 }
@@ -75,7 +73,7 @@
 {
 	struct nvkm_subdev *subdev = &ltc->subdev;
 	struct nvkm_device *device = subdev->device;
-	u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+	u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
 	u32 stat = nvkm_rd32(device, base + 0x00c);
 
 	if (stat) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
similarity index 87%
rename from drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c
rename to drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 5ad6fb9..2a29bfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -27,7 +27,7 @@
 #include <subdev/timer.h>
 
 static int
-gm204_ltc_oneinit(struct nvkm_ltc *ltc)
+gm200_ltc_oneinit(struct nvkm_ltc *ltc)
 {
 	struct nvkm_device *device = ltc->subdev.device;
 
@@ -37,15 +37,15 @@
 	return gf100_ltc_oneinit_tag_ram(ltc);
 }
 static void
-gm204_ltc_init(struct nvkm_ltc *ltc)
+gm200_ltc_init(struct nvkm_ltc *ltc)
 {
 	nvkm_wr32(ltc->subdev.device, 0x17e278, ltc->tag_base);
 }
 
 static const struct nvkm_ltc_func
-gm204_ltc = {
-	.oneinit = gm204_ltc_oneinit,
-	.init = gm204_ltc_init,
+gm200_ltc = {
+	.oneinit = gm200_ltc_oneinit,
+	.init = gm200_ltc_init,
 	.intr = gm107_ltc_intr, /*XXX: not validated */
 	.cbc_clear = gm107_ltc_cbc_clear,
 	.cbc_wait = gm107_ltc_cbc_wait,
@@ -57,7 +57,7 @@
 };
 
 int
-gm204_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
 {
-	return nvkm_ltc_new_(&gm204_ltc, device, index, pltc);
+	return nvkm_ltc_new_(&gm200_ltc, device, index, pltc);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 7702944..e2faccf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -24,8 +24,8 @@
 	0x00000000,
 /* 0x0058: proc_list_head */
 	0x54534f48,
-	0x00000507,
-	0x000004a4,
+	0x0000050a,
+	0x000004a7,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000753,
-	0x00000745,
+	0x00000756,
+	0x00000748,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x00000757,
-	0x00000755,
+	0x0000075a,
+	0x00000758,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000b87,
-	0x00000a2a,
+	0x00000b8a,
+	0x00000a2d,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000bb0,
-	0x00000b89,
+	0x00000bb3,
+	0x00000b8c,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000bbc,
-	0x00000bba,
+	0x00000bbf,
+	0x00000bbd,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -229,26 +229,26 @@
 /* 0x0370: memx_func_head */
 	0x00000001,
 	0x00000000,
-	0x00000546,
+	0x00000549,
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000005d0,
+	0x000005d3,
 	0x00000003,
 	0x00000002,
-	0x0000069a,
+	0x0000069d,
 	0x00040004,
 	0x00000000,
-	0x000006b6,
+	0x000006b9,
 	0x00010005,
 	0x00000000,
-	0x000006d3,
+	0x000006d6,
 	0x00010006,
 	0x00000000,
-	0x00000658,
+	0x0000065b,
 	0x00000007,
 	0x00000000,
-	0x000006de,
+	0x000006e1,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -917,887 +917,887 @@
 };
 
 uint32_t gf100_pmu_code[] = {
-	0x03930ef5,
+	0x03920ef5,
 /* 0x0004: rd32 */
 	0x07a007f1,
 	0xd00604b6,
 	0x04bd000e,
-	0xf001d7f0,
-	0x07f101d3,
-	0x04b607ac,
-	0x000dd006,
-/* 0x0022: rd32_wait */
-	0xd7f104bd,
-	0xd4b607ac,
-	0x00ddcf06,
-	0x7000d4f1,
-	0xf1f21bf4,
-	0xb607a4d7,
-	0xddcf06d4,
-/* 0x003f: wr32 */
-	0xf100f800,
-	0xb607a007,
-	0x0ed00604,
-	0xf104bd00,
-	0xb607a407,
+	0x0001d7f1,
+	0xf101d3f0,
+	0xb607ac07,
 	0x0dd00604,
-	0xf004bd00,
-	0xd5f002d7,
-	0x01d3f0f0,
-	0x07ac07f1,
+/* 0x0023: rd32_wait */
+	0xf104bd00,
+	0xb607acd7,
+	0xddcf06d4,
+	0x00d4f100,
+	0xf21bf470,
+	0x07a4d7f1,
+	0xcf06d4b6,
+	0x00f800dd,
+/* 0x0040: wr32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0x07a407f1,
 	0xd00604b6,
 	0x04bd000d,
-/* 0x006c: wr32_wait */
-	0x07acd7f1,
-	0xcf06d4b6,
-	0xd4f100dd,
-	0x1bf47000,
-/* 0x007f: nsec */
-	0xf900f8f2,
-	0xf080f990,
-	0x84b62c87,
-	0x0088cf06,
-/* 0x008c: nsec_loop */
-	0xb62c97f0,
-	0x99cf0694,
-	0x0298bb00,
-	0xf4069eb8,
-	0x80fcf11e,
-	0x00f890fc,
-/* 0x00a4: wait */
-	0x80f990f9,
-	0xb62c87f0,
-	0x88cf0684,
-/* 0x00b1: wait_loop */
-	0x02eeb900,
-	0xb90421f4,
-	0xadfd02da,
-	0x06acb804,
-	0xf0150bf4,
+	0x00f2d7f1,
+	0xf101d3f0,
+	0xb607ac07,
+	0x0dd00604,
+/* 0x006b: wr32_wait */
+	0xf104bd00,
+	0xb607acd7,
+	0xddcf06d4,
+	0x00d4f100,
+	0xf21bf470,
+/* 0x007e: nsec */
+	0x90f900f8,
+	0x87f080f9,
+	0x0684b62c,
+/* 0x008b: nsec_loop */
+	0xf00088cf,
 	0x94b62c97,
 	0x0099cf06,
 	0xb80298bb,
-	0x1ef4069b,
-/* 0x00d5: wait_done */
-	0xfc80fcdf,
-/* 0x00db: intr_watchdog */
-	0x9800f890,
-	0x96b003e9,
-	0x2a0bf400,
-	0xbb9a0a98,
-	0x1cf4029a,
-	0x01d7f00f,
-	0x02d221f5,
-	0x0ef494bd,
-/* 0x00f9: intr_watchdog_next_time */
-	0x9b0a9815,
-	0xf400a6b0,
-	0x9ab8090b,
-	0x061cf406,
-/* 0x0108: intr_watchdog_next_time_set */
-/* 0x010b: intr_watchdog_next_proc */
-	0x809b0980,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc61bf402,
-/* 0x011a: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0xf7f0f0f9,
-	0x0188fe00,
-	0x87f180f9,
-	0x84b605d0,
+	0x1ef4069e,
+	0xfc80fcf1,
+/* 0x00a3: wait */
+	0xf900f890,
+	0xf080f990,
+	0x84b62c87,
 	0x0088cf06,
-	0xf10180b6,
-	0xb605d007,
-	0x08d00604,
-	0xf004bd00,
-	0x84b60887,
-	0x0088cf06,
-	0xf40289c4,
-	0x0080230b,
-	0x58e7f09b,
-	0x98db21f4,
-	0x96b09b09,
-	0x110bf400,
-	0xb63407f0,
-	0x09d00604,
-	0x8004bd00,
-/* 0x017e: intr_skip_watchdog */
-	0x89e49a09,
-	0x0bf40800,
-	0x8897f148,
-	0x0694b606,
-	0xc40099cf,
-	0x0bf4029a,
-	0xc0c7f12c,
-	0x06c4b604,
-	0xf900cccf,
-	0x48e7f1c0,
-	0x53e3f14f,
-	0x00d7f054,
-	0x033721f5,
-	0x07f1c0fc,
-	0x04b604c0,
-	0x000cd006,
-/* 0x01be: intr_subintr_skip_fifo */
-	0x07f104bd,
-	0x04b60688,
-	0x0009d006,
-/* 0x01ca: intr_skip_subintr */
-	0x97f104bd,
-	0x90bd00e0,
-	0xf00489fd,
-	0x04b60407,
+/* 0x00b0: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0150b,
+	0x0694b62c,
+	0xbb0099cf,
+	0x9bb80298,
+	0xdf1ef406,
+/* 0x00d4: wait_done */
+	0x90fc80fc,
+/* 0x00da: intr_watchdog */
+	0xe99800f8,
+	0x0096b003,
+	0x982a0bf4,
+	0x9abb9a0a,
+	0x0f1cf402,
+	0xf501d7f0,
+	0xbd02d121,
+	0x150ef494,
+/* 0x00f8: intr_watchdog_next_time */
+	0xb09b0a98,
+	0x0bf400a6,
+	0x069ab809,
+/* 0x0107: intr_watchdog_next_time_set */
+	0x80061cf4,
+/* 0x010a: intr_watchdog_next_proc */
+	0xe9809b09,
+	0x58e0b603,
+	0x0268e6b1,
+	0xf8c61bf4,
+/* 0x0119: intr */
+	0xbd00f900,
+	0xf980f904,
+	0xf9a0f990,
+	0xf9c0f9b0,
+	0xf9e0f9d0,
+	0x00f7f0f0,
+	0xf90188fe,
+	0xd087f180,
+	0x0684b605,
+	0xb60088cf,
+	0x07f10180,
+	0x04b605d0,
 	0x0008d006,
-	0x80fc04bd,
-	0xfc0088fe,
-	0xfce0fcf0,
-	0xfcc0fcd0,
-	0xfca0fcb0,
-	0xfc80fc90,
-	0x0032f400,
-/* 0x01fa: ticks_from_ns */
-	0xc0f901f8,
+	0x87f004bd,
+	0x0684b608,
+	0xc40088cf,
+	0x0bf40289,
+	0x9b008023,
+	0xf458e7f0,
+	0x0998da21,
+	0x0096b09b,
+	0xf0110bf4,
+	0x04b63407,
+	0x0009d006,
+	0x098004bd,
+/* 0x017d: intr_skip_watchdog */
+	0x0089e49a,
+	0x480bf408,
+	0x068897f1,
+	0xcf0694b6,
+	0x9ac40099,
+	0x2c0bf402,
+	0x04c0c7f1,
+	0xcf06c4b6,
+	0xc0f900cc,
+	0x4f48e7f1,
+	0x5453e3f1,
+	0xf500d7f0,
+	0xfc033621,
+	0xc007f1c0,
+	0x0604b604,
+	0xbd000cd0,
+/* 0x01bd: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0604b606,
+	0xbd0009d0,
+/* 0x01c9: intr_skip_subintr */
+	0xe097f104,
+	0xfd90bd00,
+	0x07f00489,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f9: ticks_from_ns */
+	0xf9c0f901,
+	0xcbd7f1b0,
+	0x00d3f000,
+	0x040b21f5,
+	0x03e8ccec,
+	0xf400b4b0,
+	0xeeec120b,
+	0xd7f103e8,
+	0xd3f000cb,
+	0x0b21f500,
+/* 0x0221: ticks_from_ns_quit */
+	0x02ceb904,
+	0xc0fcb0fc,
+/* 0x022a: ticks_from_us */
+	0xc0f900f8,
 	0xd7f1b0f9,
 	0xd3f000cb,
-	0x0821f500,
-	0xe8ccec04,
-	0x00b4b003,
-	0xec120bf4,
-	0xf103e8ee,
-	0xf000cbd7,
-	0x21f500d3,
-/* 0x0222: ticks_from_ns_quit */
-	0xceb90408,
-	0xfcb0fc02,
-/* 0x022b: ticks_from_us */
-	0xf900f8c0,
-	0xf1b0f9c0,
-	0xf000cbd7,
-	0x21f500d3,
-	0xceb90408,
-	0x00b4b002,
-	0xbd050bf4,
-/* 0x0245: ticks_from_us_quit */
-	0xfcb0fce4,
-/* 0x024b: ticks_to_us */
-	0xf100f8c0,
-	0xf000cbd7,
-	0xedff00d3,
-/* 0x0257: timer */
-	0xf900f8ec,
-	0xf480f990,
-	0xf8981032,
-	0x0086b003,
-	0xbd651cf4,
-	0x3807f084,
-	0xd00604b6,
-	0x04bd0008,
-	0xb63487f0,
-	0x88cf0684,
-	0x9a099800,
-	0xbb0298bb,
-	0xfe8000e9,
-	0x0887f003,
-	0xcf0684b6,
-	0x84f00088,
-	0x261bf402,
-	0xb63487f0,
-	0x88cf0684,
-	0x06e0b800,
-	0xb8090bf4,
-	0x1cf406e8,
-/* 0x02ad: timer_reset */
-	0x3407f011,
-	0xd00604b6,
-	0x04bd000e,
-/* 0x02bb: timer_enable */
-	0xf09a0e80,
-	0x07f00187,
-	0x0604b638,
-	0xbd0008d0,
-/* 0x02c9: timer_done */
-	0x1031f404,
-	0x90fc80fc,
-/* 0x02d2: send_proc */
-	0x80f900f8,
-	0xe89890f9,
-	0x04e99805,
-	0xb80486f0,
-	0x0bf40689,
-	0x0398c42a,
-	0xb6048894,
-	0x8ebb1880,
-	0x00fa9800,
-	0x80008a80,
-	0x8c80018d,
-	0x038b8002,
-	0xf00190b6,
-	0xe9800794,
-	0x0231f404,
-/* 0x030c: send_done */
-	0x80fc90fc,
-/* 0x0312: find */
-	0x80f900f8,
-	0xf45887f0,
-/* 0x031a: find_loop */
-	0x8a980131,
-	0x06aeb800,
-	0xb6100bf4,
-	0x86b15880,
-	0x1bf40268,
-	0x0132f4f0,
-/* 0x0330: find_done */
-	0xfc028eb9,
-/* 0x0337: send */
-	0xf500f880,
-	0xf4031221,
-	0x00f89701,
-/* 0x0340: recv */
-	0x80f990f9,
-	0x9805e898,
-	0x32f404e9,
-	0x0689b801,
-	0xc43d0bf4,
-	0x80b60389,
-	0x0784f001,
-	0x9805e880,
-	0xf0f902ea,
-	0xf9018ffe,
-	0x02efb9f0,
-	0xbb049994,
-	0xe0b600e9,
-	0x03eb9818,
-	0x9802ec98,
-	0xee9801ed,
-	0xfca5f900,
-	0x00f8fef0,
-	0xfc0131f4,
-/* 0x038d: recv_done */
-	0xfc80fcf0,
-/* 0x0393: init */
-	0xf100f890,
-	0xb6010817,
-	0x11cf0614,
-	0x0911e700,
-	0x0814b601,
-	0xf10014fe,
-	0xf000e017,
-	0x07f00013,
-	0x0604b61c,
-	0xbd0001d0,
-	0xff17f004,
-	0xb61407f0,
-	0x01d00604,
+	0x0b21f500,
+	0x02ceb904,
+	0xf400b4b0,
+	0xe4bd050b,
+/* 0x0244: ticks_from_us_quit */
+	0xc0fcb0fc,
+/* 0x024a: ticks_to_us */
+	0xd7f100f8,
+	0xd3f000cb,
+	0xecedff00,
+/* 0x0256: timer */
+	0x90f900f8,
+	0x32f480f9,
+	0x03f89810,
+	0xf40086b0,
+	0x84bd651c,
+	0xb63807f0,
+	0x08d00604,
 	0xf004bd00,
-	0x15f10217,
-	0x07f00800,
-	0x0604b610,
-	0xbd0001d0,
-	0x1a17f104,
-	0x0013f001,
-	0xf40010fe,
-	0x17f01031,
+	0x84b63487,
+	0x0088cf06,
+	0xbb9a0998,
+	0xe9bb0298,
+	0x03fe8000,
+	0xb60887f0,
+	0x88cf0684,
+	0x0284f000,
+	0xf0261bf4,
+	0x84b63487,
+	0x0088cf06,
+	0xf406e0b8,
+	0xe8b8090b,
+	0x111cf406,
+/* 0x02ac: timer_reset */
+	0xb63407f0,
+	0x0ed00604,
+	0x8004bd00,
+/* 0x02ba: timer_enable */
+	0x87f09a0e,
 	0x3807f001,
 	0xd00604b6,
-	0x04bd0001,
-/* 0x03f7: init_proc */
-	0x9858f7f0,
-	0x16b001f1,
-	0xfa0bf400,
-	0xf0b615f9,
-	0xf20ef458,
-/* 0x0408: mulu32_32_64 */
-	0x20f910f9,
-	0x40f930f9,
-	0x9510e195,
-	0xc4bd10d2,
-	0xedffb4bd,
-	0x301dffc0,
-	0xf10234b9,
-	0xb6ffff34,
-	0x45b61034,
-	0x00c3bb10,
-	0xff01b4bb,
-	0x34b930e2,
-	0xff34f102,
-	0x1034b6ff,
-	0xbb1045b6,
-	0xb4bb00c3,
-	0x3012ff01,
-	0xfc00b3bb,
-	0xfc30fc40,
-	0xf810fc20,
-/* 0x0459: host_send */
-	0xb017f100,
-	0x0614b604,
-	0xf10011cf,
-	0xb604a027,
-	0x22cf0624,
-	0x0612b800,
-	0xc4320bf4,
-	0xee94071e,
-	0x70e0b704,
-	0x03eb9802,
-	0x9802ec98,
-	0xee9801ed,
-	0x3721f500,
-	0x0110b603,
-	0xf10f1ec4,
-	0xb604b007,
-	0x0ed00604,
-	0xf404bd00,
-/* 0x04a2: host_send_done */
-	0x00f8ba0e,
-/* 0x04a4: host_recv */
-	0x4e4917f1,
-	0x525413f1,
-	0xf406e1b8,
-/* 0x04b2: host_recv_wait */
-	0x17f1aa0b,
-	0x14b604cc,
+	0x04bd0008,
+/* 0x02c8: timer_done */
+	0xfc1031f4,
+	0xf890fc80,
+/* 0x02d1: send_proc */
+	0xf980f900,
+	0x05e89890,
+	0xf004e998,
+	0x89b80486,
+	0x2a0bf406,
+	0x940398c4,
+	0x80b60488,
+	0x008ebb18,
+	0x8000fa98,
+	0x8d80008a,
+	0x028c8001,
+	0xb6038b80,
+	0x94f00190,
+	0x04e98007,
+/* 0x030b: send_done */
+	0xfc0231f4,
+	0xf880fc90,
+/* 0x0311: find */
+	0xf080f900,
+	0x31f45887,
+/* 0x0319: find_loop */
+	0x008a9801,
+	0xf406aeb8,
+	0x80b6100b,
+	0x6886b158,
+	0xf01bf402,
+/* 0x032f: find_done */
+	0xb90132f4,
+	0x80fc028e,
+/* 0x0336: send */
+	0x21f500f8,
+	0x01f40311,
+/* 0x033f: recv */
+	0xf900f897,
+	0x9880f990,
+	0xe99805e8,
+	0x0132f404,
+	0xf40689b8,
+	0x89c43d0b,
+	0x0180b603,
+	0x800784f0,
+	0xea9805e8,
+	0xfef0f902,
+	0xf0f9018f,
+	0x9402efb9,
+	0xe9bb0499,
+	0x18e0b600,
+	0x9803eb98,
+	0xed9802ec,
+	0x00ee9801,
+	0xf0fca5f9,
+	0xf400f8fe,
+	0xf0fc0131,
+/* 0x038c: recv_done */
+	0x90fc80fc,
+/* 0x0392: init */
+	0x17f100f8,
+	0x14b60108,
 	0x0011cf06,
-	0x04c827f1,
-	0xcf0624b6,
-	0x16f00022,
-	0x0612b808,
-	0xc4e60bf4,
-	0x34b60723,
-	0xf030b704,
-	0x033b8002,
-	0x80023c80,
-	0x3e80013d,
-	0x0120b600,
-	0xf10f24f0,
-	0xb604c807,
-	0x02d00604,
-	0xf004bd00,
-	0x07f04027,
-	0x0604b600,
-	0xbd0002d0,
-/* 0x0507: host_init */
-	0xf100f804,
-	0xb6008017,
-	0x15f11014,
-	0x07f10270,
-	0x04b604d0,
-	0x0001d006,
-	0x17f104bd,
-	0x14b60080,
-	0xf015f110,
-	0xdc07f102,
-	0x0604b604,
-	0xbd0001d0,
-	0x0117f004,
-	0x04c407f1,
+	0x010911e7,
+	0xfe0814b6,
+	0x17f10014,
+	0x13f000e0,
+	0x1c07f000,
 	0xd00604b6,
 	0x04bd0001,
-/* 0x0546: memx_func_enter */
-	0x67f100f8,
-	0x77f11620,
-	0x73f1f55d,
-	0x6eb9ffff,
-	0x0421f402,
-	0xfd02d8b9,
-	0x60f90487,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0xfe77f13f,
-	0xff73f1ff,
+	0xf0ff17f0,
+	0x04b61407,
+	0x0001d006,
+	0x17f004bd,
+	0x0015f102,
+	0x1007f008,
+	0xd00604b6,
+	0x04bd0001,
+	0x011917f1,
+	0xf10013f0,
+	0xfeffff14,
+	0x31f40010,
+	0x0117f010,
+	0xb63807f0,
+	0x01d00604,
+	0xf004bd00,
+/* 0x03fa: init_proc */
+	0xf19858f7,
+	0x0016b001,
+	0xf9fa0bf4,
+	0x58f0b615,
+/* 0x040b: mulu32_32_64 */
+	0xf9f20ef4,
+	0xf920f910,
+	0x9540f930,
+	0xd29510e1,
+	0xbdc4bd10,
+	0xc0edffb4,
+	0xb9301dff,
+	0x34f10234,
+	0x34b6ffff,
+	0x1045b610,
+	0xbb00c3bb,
+	0xe2ff01b4,
+	0x0234b930,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xbb3012ff,
+	0x40fc00b3,
+	0x20fc30fc,
+	0x00f810fc,
+/* 0x045c: host_send */
+	0x04b017f1,
+	0xcf0614b6,
+	0x27f10011,
+	0x24b604a0,
+	0x0022cf06,
+	0xf40612b8,
+	0x1ec4320b,
+	0x04ee9407,
+	0x0270e0b7,
+	0x9803eb98,
+	0xed9802ec,
+	0x00ee9801,
+	0x033621f5,
+	0xc40110b6,
+	0x07f10f1e,
+	0x04b604b0,
+	0x000ed006,
+	0x0ef404bd,
+/* 0x04a5: host_send_done */
+/* 0x04a7: host_recv */
+	0xf100f8ba,
+	0xf14e4917,
+	0xb8525413,
+	0x0bf406e1,
+/* 0x04b5: host_recv_wait */
+	0xcc17f1aa,
+	0x0614b604,
+	0xf10011cf,
+	0xb604c827,
+	0x22cf0624,
+	0x0816f000,
+	0xf40612b8,
+	0x23c4e60b,
+	0x0434b607,
+	0x02f030b7,
+	0x80033b80,
+	0x3d80023c,
+	0x003e8001,
+	0xf00120b6,
+	0x07f10f24,
+	0x04b604c8,
+	0x0002d006,
+	0x27f004bd,
+	0x0007f040,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x050a: host_init */
+	0x17f100f8,
+	0x14b60080,
+	0x7015f110,
+	0xd007f102,
+	0x0604b604,
+	0xbd0001d0,
+	0x8017f104,
+	0x1014b600,
+	0x02f015f1,
+	0x04dc07f1,
+	0xd00604b6,
+	0x04bd0001,
+	0xf10117f0,
+	0xb604c407,
+	0x01d00604,
+	0xf804bd00,
+/* 0x0549: memx_func_enter */
+	0x2067f100,
+	0x5d77f116,
+	0xff73f1f5,
 	0x026eb9ff,
 	0xb90421f4,
 	0x87fd02d8,
 	0xf960f904,
 	0xfcd0fc80,
-	0x3f21f4e0,
-	0x26f067f1,
+	0x4021f4e0,
+	0xfffe77f1,
+	0xffff73f1,
 	0xf4026eb9,
 	0xd8b90421,
 	0x0487fd02,
 	0x80f960f9,
 	0xe0fcd0fc,
-	0xf03f21f4,
+	0xf14021f4,
+	0xb926f067,
+	0x21f4026e,
+	0x02d8b904,
+	0xf90487fd,
+	0xfc80f960,
+	0xf4e0fcd0,
+	0x67f04021,
+	0xe007f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x05b5: memx_func_enter_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f3,
+	0xcf0664b6,
+	0x06800066,
+/* 0x05d3: memx_func_leave */
+	0xf000f8f1,
+	0x64b62c67,
+	0x0066cf06,
+	0xf0f20680,
 	0x07f10467,
-	0x04b607e0,
+	0x04b607e4,
 	0x0006d006,
-/* 0x05b2: memx_func_enter_wait */
+/* 0x05ee: memx_func_leave_wait */
 	0x67f104bd,
 	0x64b607c0,
 	0x0066cf06,
 	0xf40464f0,
-	0x67f0f30b,
-	0x0664b62c,
-	0x800066cf,
-	0x00f8f106,
-/* 0x05d0: memx_func_leave */
-	0xb62c67f0,
-	0x66cf0664,
-	0xf2068000,
-	0xf10467f0,
-	0xb607e407,
-	0x06d00604,
-/* 0x05eb: memx_func_leave_wait */
-	0xf104bd00,
-	0xb607c067,
-	0x66cf0664,
-	0x0464f000,
-	0xf1f31bf4,
-	0xf126f067,
-	0xf0000177,
+	0x67f1f31b,
+	0x77f126f0,
+	0x73f00001,
+	0x026eb900,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f905,
+	0xfcd0fc80,
+	0x4021f4e0,
+	0x162067f1,
+	0xf4026eb9,
+	0xd8b90421,
+	0x0587fd02,
+	0x80f960f9,
+	0xe0fcd0fc,
+	0xf14021f4,
+	0xf00aa277,
 	0x6eb90073,
 	0x0421f402,
 	0xfd02d8b9,
 	0x60f90587,
 	0xd0fc80f9,
 	0x21f4e0fc,
-	0x2067f13f,
-	0x026eb916,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f905,
-	0xfcd0fc80,
-	0x3f21f4e0,
-	0x0aa277f1,
-	0xb90073f0,
-	0x21f4026e,
-	0x02d8b904,
-	0xf90587fd,
-	0xfc80f960,
-	0xf4e0fcd0,
-	0x00f83f21,
-/* 0x0658: memx_func_wait_vblank */
-	0xb0001698,
-	0x0bf40066,
-	0x0166b013,
-	0xf4060bf4,
-/* 0x066a: memx_func_wait_vblank_head1 */
-	0x77f12e0e,
-	0x0ef40020,
-/* 0x0671: memx_func_wait_vblank_head0 */
-	0x0877f107,
-/* 0x0675: memx_func_wait_vblank_0 */
-	0xc467f100,
-	0x0664b607,
-	0xfd0066cf,
-	0x1bf40467,
-/* 0x0685: memx_func_wait_vblank_1 */
-	0xc467f1f3,
-	0x0664b607,
-	0xfd0066cf,
-	0x0bf40467,
-/* 0x0695: memx_func_wait_vblank_fini */
-	0x0410b6f3,
-/* 0x069a: memx_func_wr32 */
-	0x169800f8,
-	0x01159800,
-	0xf90810b6,
-	0xfc50f960,
-	0xf4e0fcd0,
-	0x42b63f21,
-	0xe91bf402,
-/* 0x06b6: memx_func_wait */
-	0x87f000f8,
-	0x0684b62c,
-	0x980088cf,
-	0x1d98001e,
-	0x021c9801,
-	0xb6031b98,
-	0x21f41010,
-/* 0x06d3: memx_func_delay */
-	0x9800f8a4,
-	0x10b6001e,
-	0x7f21f404,
-/* 0x06de: memx_func_train */
-	0x00f800f8,
-/* 0x06e0: memx_exec */
-	0xd0f9e0f9,
-	0xb902c1b9,
-/* 0x06ea: memx_exec_next */
-	0x139802b2,
+/* 0x065b: memx_func_wait_vblank */
+	0x9800f840,
+	0x66b00016,
+	0x130bf400,
+	0xf40166b0,
+	0x0ef4060b,
+/* 0x066d: memx_func_wait_vblank_head1 */
+	0x2077f12e,
+	0x070ef400,
+/* 0x0674: memx_func_wait_vblank_head0 */
+	0x000877f1,
+/* 0x0678: memx_func_wait_vblank_0 */
+	0x07c467f1,
+	0xcf0664b6,
+	0x67fd0066,
+	0xf31bf404,
+/* 0x0688: memx_func_wait_vblank_1 */
+	0x07c467f1,
+	0xcf0664b6,
+	0x67fd0066,
+	0xf30bf404,
+/* 0x0698: memx_func_wait_vblank_fini */
+	0xf80410b6,
+/* 0x069d: memx_func_wr32 */
+	0x00169800,
+	0xb6011598,
+	0x60f90810,
+	0xd0fc50f9,
+	0x21f4e0fc,
+	0x0242b640,
+	0xf8e91bf4,
+/* 0x06b9: memx_func_wait */
+	0x2c87f000,
+	0xcf0684b6,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0xa321f410,
+/* 0x06d6: memx_func_delay */
+	0x1e9800f8,
 	0x0410b600,
-	0x01f034e7,
-	0x01e033e7,
-	0xf00132b6,
-	0x35980c30,
-	0xb855f9de,
-	0x1ef40612,
-	0xf10b98e4,
-	0xbbf20c98,
-	0xb7f102cb,
-	0xb4b607c4,
-	0x00bbcf06,
-	0xe0fcd0fc,
-	0x033721f5,
-/* 0x0726: memx_info */
-	0xc67000f8,
-	0x0e0bf401,
-/* 0x072c: memx_info_data */
-	0x03ccc7f1,
-	0x0800b7f1,
-/* 0x0737: memx_info_train */
-	0xf10b0ef4,
-	0xf10bccc7,
-/* 0x073f: memx_info_send */
-	0xf50100b7,
-	0xf8033721,
-/* 0x0745: memx_recv */
-	0x01d6b000,
-	0xb0980bf4,
-	0x0bf400d6,
-/* 0x0753: memx_init */
-	0xf800f8d8,
-/* 0x0755: perf_recv */
-/* 0x0757: perf_init */
-	0xf800f800,
-/* 0x0759: i2c_drive_scl */
-	0x0036b000,
-	0xf1110bf4,
-	0xb607e007,
-	0x01d00604,
-	0xf804bd00,
-/* 0x076d: i2c_drive_scl_lo */
-	0xe407f100,
-	0x0604b607,
-	0xbd0001d0,
-/* 0x077b: i2c_drive_sda */
-	0xb000f804,
-	0x0bf40036,
-	0xe007f111,
-	0x0604b607,
-	0xbd0002d0,
-/* 0x078f: i2c_drive_sda_lo */
-	0xf100f804,
-	0xb607e407,
-	0x02d00604,
-	0xf804bd00,
-/* 0x079d: i2c_sense_scl */
-	0x0132f400,
-	0x07c437f1,
-	0xcf0634b6,
-	0x31fd0033,
-	0x060bf404,
-/* 0x07b3: i2c_sense_scl_done */
-	0xf80131f4,
-/* 0x07b5: i2c_sense_sda */
-	0x0132f400,
-	0x07c437f1,
-	0xcf0634b6,
-	0x32fd0033,
-	0x060bf404,
-/* 0x07cb: i2c_sense_sda_done */
-	0xf80131f4,
-/* 0x07cd: i2c_raise_scl */
-	0xf140f900,
-	0xf0089847,
-	0x21f50137,
-/* 0x07da: i2c_raise_scl_wait */
-	0xe7f10759,
-	0x21f403e8,
-	0x9d21f57f,
-	0x0901f407,
-	0xf40142b6,
-/* 0x07ee: i2c_raise_scl_done */
-	0x40fcef1b,
-/* 0x07f2: i2c_start */
-	0x21f500f8,
-	0x11f4079d,
-	0xb521f50d,
-	0x0611f407,
-/* 0x0803: i2c_start_rep */
-	0xf0300ef4,
+	0xf87e21f4,
+/* 0x06e1: memx_func_train */
+/* 0x06e3: memx_exec */
+	0xf900f800,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x06ed: memx_exec_next */
+	0x00139802,
+	0xe70410b6,
+	0xe701f034,
+	0xb601e033,
+	0x30f00132,
+	0xde35980c,
+	0x12b855f9,
+	0xe41ef406,
+	0x98f10b98,
+	0xcbbbf20c,
+	0xc4b7f102,
+	0x06b4b607,
+	0xfc00bbcf,
+	0xf5e0fcd0,
+	0xf8033621,
+/* 0x0729: memx_info */
+	0x01c67000,
+/* 0x072f: memx_info_data */
+	0xf10e0bf4,
+	0xf103ccc7,
+	0xf40800b7,
+/* 0x073a: memx_info_train */
+	0xc7f10b0e,
+	0xb7f10bcc,
+/* 0x0742: memx_info_send */
+	0x21f50100,
+	0x00f80336,
+/* 0x0748: memx_recv */
+	0xf401d6b0,
+	0xd6b0980b,
+	0xd80bf400,
+/* 0x0756: memx_init */
+	0x00f800f8,
+/* 0x0758: perf_recv */
+/* 0x075a: perf_init */
+	0x00f800f8,
+/* 0x075c: i2c_drive_scl */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
+	0x0001d006,
+	0x00f804bd,
+/* 0x0770: i2c_drive_scl_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x077e: i2c_drive_sda */
+	0x36b000f8,
+	0x110bf400,
+	0x07e007f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x0792: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x04b607e4,
+	0x0002d006,
+	0x00f804bd,
+/* 0x07a0: i2c_sense_scl */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x07b6: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x07b8: i2c_sense_sda */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x07ce: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x07d0: i2c_raise_scl */
+	0x47f140f9,
+	0x37f00898,
+	0x5c21f501,
+/* 0x07dd: i2c_raise_scl_wait */
+	0xe8e7f107,
+	0x7e21f403,
+	0x07a021f5,
+	0xb60901f4,
+	0x1bf40142,
+/* 0x07f1: i2c_raise_scl_done */
+	0xf840fcef,
+/* 0x07f5: i2c_start */
+	0xa021f500,
+	0x0d11f407,
+	0x07b821f5,
+	0xf40611f4,
+/* 0x0806: i2c_start_rep */
+	0x37f0300e,
+	0x5c21f500,
+	0x0137f007,
+	0x077e21f5,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xd021f550,
+	0x0464b607,
+/* 0x0833: i2c_start_send */
+	0xf01f11f4,
 	0x21f50037,
-	0x37f00759,
-	0x7b21f501,
-	0x0076bb07,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b607cd,
-	0x1f11f404,
-/* 0x0830: i2c_start_send */
-	0xf50037f0,
-	0xf1077b21,
-	0xf41388e7,
-	0x37f07f21,
-	0x5921f500,
-	0x88e7f107,
-	0x7f21f413,
-/* 0x084c: i2c_start_out */
-/* 0x084e: i2c_stop */
-	0x37f000f8,
-	0x5921f500,
-	0x0037f007,
-	0x077b21f5,
-	0x03e8e7f1,
-	0xf07f21f4,
-	0x21f50137,
-	0xe7f10759,
+	0xe7f1077e,
 	0x21f41388,
-	0x0137f07f,
-	0x077b21f5,
+	0x0037f07e,
+	0x075c21f5,
 	0x1388e7f1,
-	0xf87f21f4,
-/* 0x0881: i2c_bitw */
-	0x7b21f500,
-	0xe8e7f107,
-	0x7f21f403,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xcd21f550,
-	0x0464b607,
-	0xf11811f4,
-	0xf41388e7,
-	0x37f07f21,
-	0x5921f500,
+/* 0x084f: i2c_start_out */
+	0xf87e21f4,
+/* 0x0851: i2c_stop */
+	0x0037f000,
+	0x075c21f5,
+	0xf50037f0,
+	0xf1077e21,
+	0xf403e8e7,
+	0x37f07e21,
+	0x5c21f501,
 	0x88e7f107,
-	0x7f21f413,
-/* 0x08c0: i2c_bitw_out */
-/* 0x08c2: i2c_bitr */
-	0x37f000f8,
-	0x7b21f501,
-	0xe8e7f107,
-	0x7f21f403,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xcd21f550,
-	0x0464b607,
-	0xf51b11f4,
-	0xf007b521,
-	0x21f50037,
-	0xe7f10759,
+	0x7e21f413,
+	0xf50137f0,
+	0xf1077e21,
+	0xf41388e7,
+	0x00f87e21,
+/* 0x0884: i2c_bitw */
+	0x077e21f5,
+	0x03e8e7f1,
+	0xbb7e21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07d021f5,
+	0xf40464b6,
+	0xe7f11811,
 	0x21f41388,
-	0x013cf07f,
-/* 0x0907: i2c_bitr_done */
-	0xf80131f4,
-/* 0x0909: i2c_get_byte */
-	0x0057f000,
-/* 0x090f: i2c_get_byte_next */
-	0xb60847f0,
-	0x76bb0154,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb608c221,
-	0x11f40464,
-	0x0553fd2b,
-	0xf40142b6,
-	0x37f0d81b,
+	0x0037f07e,
+	0x075c21f5,
+	0x1388e7f1,
+/* 0x08c3: i2c_bitw_out */
+	0xf87e21f4,
+/* 0x08c5: i2c_bitr */
+	0x0137f000,
+	0x077e21f5,
+	0x03e8e7f1,
+	0xbb7e21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07d021f5,
+	0xf40464b6,
+	0x21f51b11,
+	0x37f007b8,
+	0x5c21f500,
+	0x88e7f107,
+	0x7e21f413,
+	0xf4013cf0,
+/* 0x090a: i2c_bitr_done */
+	0x00f80131,
+/* 0x090c: i2c_get_byte */
+	0xf00057f0,
+/* 0x0912: i2c_get_byte_next */
+	0x54b60847,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b60881,
-/* 0x0959: i2c_get_byte_done */
-/* 0x095b: i2c_put_byte */
-	0xf000f804,
-/* 0x095e: i2c_put_byte_next */
-	0x42b60847,
-	0x3854ff01,
+	0x64b608c5,
+	0x2b11f404,
+	0xb60553fd,
+	0x1bf40142,
+	0x0137f0d8,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0x8121f550,
+	0x8421f550,
 	0x0464b608,
-	0xb03411f4,
-	0x1bf40046,
-	0x0076bbd8,
+/* 0x095c: i2c_get_byte_done */
+/* 0x095e: i2c_put_byte */
+	0x47f000f8,
+/* 0x0961: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x088421f5,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xc521f550,
+	0x0464b608,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x09b7: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x09b9: i2c_addr */
+	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b608c2,
-	0x0f11f404,
-	0xb00076bb,
-	0x1bf40136,
-	0x0132f406,
-/* 0x09b4: i2c_put_byte_done */
-/* 0x09b6: i2c_addr */
-	0x76bb00f8,
+	0x64b607f5,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb607f221,
-	0x11f40464,
-	0x2ec3e729,
-	0x0134b601,
-	0xbb0553fd,
+	0xb6095e21,
+/* 0x09fe: i2c_addr_done */
+	0x00f80464,
+/* 0x0a00: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b702e4,
+	0xee980d1c,
+/* 0x0a0f: i2c_acquire */
+	0xf500f800,
+	0xf40a0021,
+	0xd9f00421,
+	0x4021f403,
+/* 0x0a1e: i2c_release */
+	0x21f500f8,
+	0x21f40a00,
+	0x03daf004,
+	0xf84021f4,
+/* 0x0a2d: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xf413a001,
+	0x0032980c,
+	0x0ccc13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x095b21f5,
-/* 0x09fb: i2c_addr_done */
-	0xf80464b6,
-/* 0x09fd: i2c_acquire_addr */
-	0xf8cec700,
-	0xb702e4b6,
-	0x980d1ce0,
-	0x00f800ee,
-/* 0x0a0c: i2c_acquire */
-	0x09fd21f5,
-	0xf00421f4,
-	0x21f403d9,
-/* 0x0a1b: i2c_release */
-	0xf500f83f,
-	0xf409fd21,
-	0xdaf00421,
-	0x3f21f403,
-/* 0x0a2a: i2c_recv */
-	0x32f400f8,
-	0xf8c1c701,
-	0xb00214b6,
-	0x1ff52816,
-	0x13a0013a,
-	0x32980cf4,
-	0xcc13a000,
-	0x0031980c,
-	0xf90231f4,
-	0xf9e0f9d0,
-	0x0067f1d0,
-	0x0063f100,
-	0x01679210,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x0c21f550,
-	0x0464b60a,
-	0xd6b0d0fc,
-	0xb31bf500,
-	0x0057f000,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xb621f550,
-	0x0464b609,
-	0x00d011f5,
-	0xbbe0c5c7,
+	0x0a0f21f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x095b21f5,
+	0x09b921f5,
 	0xf50464b6,
-	0xf000ad11,
-	0x76bb0157,
+	0xc700d011,
+	0x76bbe0c5,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb609b621,
+	0xb6095e21,
 	0x11f50464,
-	0x76bb008a,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6090921,
-	0x11f40464,
-	0xe05bcb6a,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x4e21f550,
-	0x0464b608,
-	0xbd025bb9,
-	0x430ef474,
-/* 0x0b30: i2c_recv_not_rd08 */
-	0xf401d6b0,
-	0x57f03d1b,
-	0xb621f500,
-	0x3311f409,
-	0xf5e0c5c7,
-	0xf4095b21,
-	0x57f02911,
-	0xb621f500,
-	0x1f11f409,
-	0xf5e0b5c7,
-	0xf4095b21,
-	0x21f51511,
-	0x74bd084e,
-	0xf408c5c7,
-	0x32f4091b,
-	0x030ef402,
-/* 0x0b70: i2c_recv_not_wr08 */
-/* 0x0b70: i2c_recv_done */
-	0xf5f8cec7,
-	0xfc0a1b21,
-	0xf4d0fce0,
-	0x7cb90a12,
-	0x3721f502,
-/* 0x0b85: i2c_recv_exit */
-/* 0x0b87: i2c_init */
-	0xf800f803,
-/* 0x0b89: test_recv */
-	0xd817f100,
-	0x0614b605,
-	0xb60011cf,
-	0x07f10110,
-	0x04b605d8,
-	0x0001d006,
-	0xe7f104bd,
-	0xe3f1d900,
-	0x21f5134f,
-	0x00f80257,
-/* 0x0bb0: test_init */
-	0x0800e7f1,
-	0x025721f5,
-/* 0x0bba: idle_recv */
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b609b9,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b6090c,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x085121f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x0b33: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x09b921f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f4095e,
+	0x0057f029,
+	0x09b921f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f4095e,
+	0x5121f515,
+	0xc774bd08,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x0b73: i2c_recv_not_wr08 */
+/* 0x0b73: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc0a1e,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x033621f5,
+/* 0x0b88: i2c_recv_exit */
+/* 0x0b8a: i2c_init */
 	0x00f800f8,
-/* 0x0bbc: idle */
-	0xf10031f4,
-	0xb605d417,
-	0x11cf0614,
-	0x0110b600,
-	0x05d407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x0bd8: idle_loop */
-	0xf45817f0,
-/* 0x0bde: idle_proc */
-/* 0x0bde: idle_proc_exec */
-	0x10f90232,
-	0xf5021eb9,
-	0xfc034021,
-	0x0911f410,
-	0xf40231f4,
-/* 0x0bf2: idle_proc_next */
-	0x10b6ef0e,
-	0x061fb858,
-	0xf4e61bf4,
-	0x28f4dd02,
-	0xbb0ef400,
-	0x00000000,
+/* 0x0b8c: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0x5621f513,
+/* 0x0bb3: test_init */
+	0xf100f802,
+	0xf50800e7,
+	0xf8025621,
+/* 0x0bbd: idle_recv */
+/* 0x0bbf: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x0bdb: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x0be1: idle_proc */
+/* 0x0be1: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc033f,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x0bf5: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 7bf6b39..2d5bdc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -24,8 +24,8 @@
 	0x00000000,
 /* 0x0058: proc_list_head */
 	0x54534f48,
-	0x00000492,
-	0x0000043b,
+	0x00000495,
+	0x0000043e,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000680,
-	0x00000672,
+	0x00000683,
+	0x00000675,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x00000684,
-	0x00000682,
+	0x00000687,
+	0x00000685,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000a9f,
-	0x00000942,
+	0x00000aa2,
+	0x00000945,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000ac2,
-	0x00000aa1,
+	0x00000ac5,
+	0x00000aa4,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000ace,
-	0x00000acc,
+	0x00000ad1,
+	0x00000acf,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -229,26 +229,26 @@
 /* 0x0370: memx_func_head */
 	0x00000001,
 	0x00000000,
-	0x000004c8,
+	0x000004cb,
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x00000549,
+	0x0000054c,
 	0x00000003,
 	0x00000002,
-	0x000005cd,
+	0x000005d0,
 	0x00040004,
 	0x00000000,
-	0x000005e9,
+	0x000005ec,
 	0x00010005,
 	0x00000000,
-	0x00000603,
+	0x00000606,
 	0x00010006,
 	0x00000000,
-	0x000005c8,
+	0x000005cb,
 	0x00000007,
 	0x00000000,
-	0x0000060e,
+	0x00000611,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -916,821 +916,821 @@
 };
 
 uint32_t gf119_pmu_code[] = {
-	0x03420ef5,
+	0x03410ef5,
 /* 0x0004: rd32 */
 	0x07a007f1,
 	0xbd000ed0,
-	0x01d7f004,
-	0xf101d3f0,
-	0xd007ac07,
-	0x04bd000d,
-/* 0x001c: rd32_wait */
-	0x07acd7f1,
-	0xf100ddcf,
-	0xf47000d4,
-	0xd7f1f51b,
-	0xddcf07a4,
-/* 0x0033: wr32 */
-	0xf100f800,
-	0xd007a007,
-	0x04bd000e,
-	0x07a407f1,
+	0x01d7f104,
+	0x01d3f000,
+	0x07ac07f1,
 	0xbd000dd0,
-	0x02d7f004,
-	0xf0f0d5f0,
-	0x07f101d3,
-	0x0dd007ac,
-/* 0x0057: wr32_wait */
-	0xf104bd00,
-	0xcf07acd7,
-	0xd4f100dd,
-	0x1bf47000,
-/* 0x0067: nsec */
-	0xf900f8f5,
-	0xf080f990,
-	0x88cf2c87,
-/* 0x0071: nsec_loop */
-	0x2c97f000,
-	0xbb0099cf,
-	0x9eb80298,
-	0xf41ef406,
-	0x90fc80fc,
-/* 0x0086: wait */
+/* 0x001d: rd32_wait */
+	0xacd7f104,
+	0x00ddcf07,
+	0x7000d4f1,
+	0xf1f51bf4,
+	0xcf07a4d7,
+	0x00f800dd,
+/* 0x0034: wr32 */
+	0x07a007f1,
+	0xbd000ed0,
+	0xa407f104,
+	0x000dd007,
+	0xd7f104bd,
+	0xd3f000f2,
+	0xac07f101,
+	0x000dd007,
+/* 0x0056: wr32_wait */
+	0xd7f104bd,
+	0xddcf07ac,
+	0x00d4f100,
+	0xf51bf470,
+/* 0x0066: nsec */
 	0x90f900f8,
 	0x87f080f9,
 	0x0088cf2c,
-/* 0x0090: wait_loop */
-	0xf402eeb9,
-	0xdab90421,
-	0x04adfd02,
-	0xf406acb8,
-	0x97f0120b,
-	0x0099cf2c,
-	0xb80298bb,
-	0x1ef4069b,
-/* 0x00b1: wait_done */
-	0xfc80fce2,
-/* 0x00b7: intr_watchdog */
-	0x9800f890,
-	0x96b003e9,
-	0x2a0bf400,
-	0xbb9a0a98,
-	0x1cf4029a,
-	0x01d7f00f,
-	0x028121f5,
-	0x0ef494bd,
-/* 0x00d5: intr_watchdog_next_time */
-	0x9b0a9815,
-	0xf400a6b0,
-	0x9ab8090b,
-	0x061cf406,
-/* 0x00e4: intr_watchdog_next_time_set */
-/* 0x00e7: intr_watchdog_next_proc */
-	0x809b0980,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc61bf402,
-/* 0x00f6: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0xf7f0f0f9,
-	0x0188fe00,
-	0x87f180f9,
-	0x88cf05d0,
-	0x0180b600,
-	0x05d007f1,
-	0xbd0008d0,
-	0x0887f004,
-	0xc40088cf,
-	0x0bf40289,
-	0x9b008020,
-	0xf458e7f0,
-	0x0998b721,
-	0x0096b09b,
-	0xf00e0bf4,
-	0x09d03407,
-	0x8004bd00,
-/* 0x014e: intr_skip_watchdog */
-	0x89e49a09,
-	0x0bf40800,
-	0x8897f13c,
-	0x0099cf06,
-	0xf4029ac4,
-	0xc7f1260b,
-	0xcccf04c0,
-	0xf1c0f900,
-	0xf14f48e7,
-	0xf05453e3,
-	0x21f500d7,
-	0xc0fc02e6,
-	0x04c007f1,
-	0xbd000cd0,
-/* 0x0185: intr_subintr_skip_fifo */
-	0x8807f104,
-	0x0009d006,
-/* 0x018e: intr_skip_subintr */
-	0x97f104bd,
-	0x90bd00e0,
-	0xf00489fd,
-	0x08d00407,
-	0xfc04bd00,
-	0x0088fe80,
-	0xe0fcf0fc,
-	0xc0fcd0fc,
-	0xa0fcb0fc,
-	0x80fc90fc,
-	0x32f400fc,
-/* 0x01bb: ticks_from_ns */
-	0xf901f800,
+/* 0x0070: nsec_loop */
+	0xcf2c97f0,
+	0x98bb0099,
+	0x069eb802,
+	0xfcf41ef4,
+	0xf890fc80,
+/* 0x0085: wait */
+	0xf990f900,
+	0x2c87f080,
+/* 0x008f: wait_loop */
+	0xb90088cf,
+	0x21f402ee,
+	0x02dab904,
+	0xb804adfd,
+	0x0bf406ac,
+	0x2c97f012,
+	0xbb0099cf,
+	0x9bb80298,
+	0xe21ef406,
+/* 0x00b0: wait_done */
+	0x90fc80fc,
+/* 0x00b6: intr_watchdog */
+	0xe99800f8,
+	0x0096b003,
+	0x982a0bf4,
+	0x9abb9a0a,
+	0x0f1cf402,
+	0xf501d7f0,
+	0xbd028021,
+	0x150ef494,
+/* 0x00d4: intr_watchdog_next_time */
+	0xb09b0a98,
+	0x0bf400a6,
+	0x069ab809,
+/* 0x00e3: intr_watchdog_next_time_set */
+	0x80061cf4,
+/* 0x00e6: intr_watchdog_next_proc */
+	0xe9809b09,
+	0x58e0b603,
+	0x0268e6b1,
+	0xf8c61bf4,
+/* 0x00f5: intr */
+	0xbd00f900,
+	0xf980f904,
+	0xf9a0f990,
+	0xf9c0f9b0,
+	0xf9e0f9d0,
+	0x00f7f0f0,
+	0xf90188fe,
+	0xd087f180,
+	0x0088cf05,
+	0xf10180b6,
+	0xd005d007,
+	0x04bd0008,
+	0xcf0887f0,
+	0x89c40088,
+	0x200bf402,
+	0xf09b0080,
+	0x21f458e7,
+	0x9b0998b6,
+	0xf40096b0,
+	0x07f00e0b,
+	0x0009d034,
+	0x098004bd,
+/* 0x014d: intr_skip_watchdog */
+	0x0089e49a,
+	0x3c0bf408,
+	0x068897f1,
+	0xc40099cf,
+	0x0bf4029a,
+	0xc0c7f126,
+	0x00cccf04,
+	0xe7f1c0f9,
+	0xe3f14f48,
+	0xd7f05453,
+	0xe521f500,
+	0xf1c0fc02,
+	0xd004c007,
+	0x04bd000c,
+/* 0x0184: intr_subintr_skip_fifo */
+	0x068807f1,
+	0xbd0009d0,
+/* 0x018d: intr_skip_subintr */
+	0xe097f104,
+	0xfd90bd00,
+	0x07f00489,
+	0x0008d004,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x01ba: ticks_from_ns */
+	0xc0f901f8,
+	0xd7f1b0f9,
+	0xd3f00144,
+	0xab21f500,
+	0xe8ccec03,
+	0x00b4b003,
+	0xec120bf4,
+	0xf103e8ee,
+	0xf00144d7,
+	0x21f500d3,
+/* 0x01e2: ticks_from_ns_quit */
+	0xceb903ab,
+	0xfcb0fc02,
+/* 0x01eb: ticks_from_us */
+	0xf900f8c0,
 	0xf1b0f9c0,
 	0xf00144d7,
 	0x21f500d3,
-	0xccec03a8,
-	0xb4b003e8,
-	0x120bf400,
-	0x03e8eeec,
-	0x0144d7f1,
-	0xf500d3f0,
-/* 0x01e3: ticks_from_ns_quit */
-	0xb903a821,
-	0xb0fc02ce,
-	0x00f8c0fc,
-/* 0x01ec: ticks_from_us */
-	0xb0f9c0f9,
-	0x0144d7f1,
-	0xf500d3f0,
-	0xb903a821,
-	0xb4b002ce,
-	0x050bf400,
-/* 0x0206: ticks_from_us_quit */
-	0xb0fce4bd,
-	0x00f8c0fc,
-/* 0x020c: ticks_to_us */
-	0x0144d7f1,
-	0xff00d3f0,
-	0x00f8eced,
-/* 0x0218: timer */
-	0x80f990f9,
-	0x981032f4,
-	0x86b003f8,
-	0x531cf400,
-	0x07f084bd,
-	0x0008d038,
-	0x87f004bd,
-	0x0088cf34,
-	0xbb9a0998,
-	0xe9bb0298,
-	0x03fe8000,
-	0xcf0887f0,
-	0x84f00088,
-	0x201bf402,
-	0xcf3487f0,
-	0xe0b80088,
-	0x090bf406,
-	0xf406e8b8,
-/* 0x0262: timer_reset */
-	0x07f00e1c,
-	0x000ed034,
-	0x0e8004bd,
-/* 0x026d: timer_enable */
-	0x0187f09a,
-	0xd03807f0,
-	0x04bd0008,
-/* 0x0278: timer_done */
-	0xfc1031f4,
-	0xf890fc80,
-/* 0x0281: send_proc */
-	0xf980f900,
-	0x05e89890,
-	0xf004e998,
-	0x89b80486,
-	0x2a0bf406,
-	0x940398c4,
-	0x80b60488,
-	0x008ebb18,
-	0x8000fa98,
-	0x8d80008a,
-	0x028c8001,
-	0xb6038b80,
-	0x94f00190,
-	0x04e98007,
-/* 0x02bb: send_done */
-	0xfc0231f4,
-	0xf880fc90,
-/* 0x02c1: find */
-	0xf080f900,
-	0x31f45887,
-/* 0x02c9: find_loop */
-	0x008a9801,
-	0xf406aeb8,
-	0x80b6100b,
-	0x6886b158,
-	0xf01bf402,
-/* 0x02df: find_done */
-	0xb90132f4,
-	0x80fc028e,
-/* 0x02e6: send */
-	0x21f500f8,
-	0x01f402c1,
-/* 0x02ef: recv */
-	0xf900f897,
-	0x9880f990,
-	0xe99805e8,
-	0x0132f404,
-	0xf40689b8,
-	0x89c43d0b,
-	0x0180b603,
-	0x800784f0,
-	0xea9805e8,
-	0xfef0f902,
-	0xf0f9018f,
-	0x9402efb9,
-	0xe9bb0499,
-	0x18e0b600,
-	0x9803eb98,
-	0xed9802ec,
-	0x00ee9801,
-	0xf0fca5f9,
-	0xf400f8fe,
-	0xf0fc0131,
-/* 0x033c: recv_done */
-	0x90fc80fc,
-/* 0x0342: init */
-	0x17f100f8,
-	0x11cf0108,
-	0x0911e700,
-	0x0814b601,
-	0xf10014fe,
-	0xf000e017,
-	0x07f00013,
-	0x0001d01c,
-	0x17f004bd,
-	0x1407f0ff,
-	0xbd0001d0,
-	0x0217f004,
-	0x080015f1,
-	0xd01007f0,
-	0x04bd0001,
-	0x00f617f1,
-	0xfe0013f0,
-	0x31f40010,
-	0x0117f010,
-	0xd03807f0,
-	0x04bd0001,
-/* 0x0397: init_proc */
-	0x9858f7f0,
-	0x16b001f1,
-	0xfa0bf400,
-	0xf0b615f9,
-	0xf20ef458,
-/* 0x03a8: mulu32_32_64 */
-	0x20f910f9,
-	0x40f930f9,
-	0x9510e195,
-	0xc4bd10d2,
-	0xedffb4bd,
-	0x301dffc0,
-	0xf10234b9,
-	0xb6ffff34,
-	0x45b61034,
-	0x00c3bb10,
-	0xff01b4bb,
-	0x34b930e2,
-	0xff34f102,
-	0x1034b6ff,
-	0xbb1045b6,
-	0xb4bb00c3,
-	0x3012ff01,
-	0xfc00b3bb,
-	0xfc30fc40,
-	0xf810fc20,
-/* 0x03f9: host_send */
-	0xb017f100,
-	0x0011cf04,
-	0x04a027f1,
-	0xb80022cf,
-	0x0bf40612,
-	0x071ec42f,
-	0xb704ee94,
-	0x980270e0,
+	0xceb903ab,
+	0x00b4b002,
+	0xbd050bf4,
+/* 0x0205: ticks_from_us_quit */
+	0xfcb0fce4,
+/* 0x020b: ticks_to_us */
+	0xf100f8c0,
+	0xf00144d7,
+	0xedff00d3,
+/* 0x0217: timer */
+	0xf900f8ec,
+	0xf480f990,
+	0xf8981032,
+	0x0086b003,
+	0xbd531cf4,
+	0x3807f084,
+	0xbd0008d0,
+	0x3487f004,
+	0x980088cf,
+	0x98bb9a09,
+	0x00e9bb02,
+	0xf003fe80,
+	0x88cf0887,
+	0x0284f000,
+	0xf0201bf4,
+	0x88cf3487,
+	0x06e0b800,
+	0xb8090bf4,
+	0x1cf406e8,
+/* 0x0261: timer_reset */
+	0x3407f00e,
+	0xbd000ed0,
+	0x9a0e8004,
+/* 0x026c: timer_enable */
+	0xf00187f0,
+	0x08d03807,
+/* 0x0277: timer_done */
+	0xf404bd00,
+	0x80fc1031,
+	0x00f890fc,
+/* 0x0280: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x02ba: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x02c0: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x02c8: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x026886b1,
+	0xf4f01bf4,
+/* 0x02de: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02e5: send */
+	0xc021f500,
+	0x9701f402,
+/* 0x02ee: recv */
+	0x90f900f8,
+	0xe89880f9,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
 	0xec9803eb,
 	0x01ed9802,
-	0xf500ee98,
-	0xb602e621,
-	0x1ec40110,
-	0xb007f10f,
-	0x000ed004,
-	0x0ef404bd,
-/* 0x0439: host_send_done */
-/* 0x043b: host_recv */
-	0xf100f8c3,
-	0xf14e4917,
-	0xb8525413,
-	0x0bf406e1,
-/* 0x0449: host_recv_wait */
-	0xcc17f1b3,
-	0x0011cf04,
-	0x04c827f1,
-	0xf00022cf,
-	0x12b80816,
-	0xec0bf406,
-	0xb60723c4,
-	0x30b70434,
-	0x3b8002f0,
-	0x023c8003,
-	0x80013d80,
-	0x20b6003e,
-	0x0f24f001,
-	0x04c807f1,
-	0xbd0002d0,
-	0x4027f004,
-	0xd00007f0,
-	0x04bd0002,
-/* 0x0492: host_init */
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x033b: recv_done */
+	0xfcf0fc01,
+	0xf890fc80,
+/* 0x0341: init */
+	0x0817f100,
+	0x0011cf01,
+	0x010911e7,
+	0xfe0814b6,
+	0x17f10014,
+	0x13f000e0,
+	0x1c07f000,
+	0xbd0001d0,
+	0xff17f004,
+	0xd01407f0,
+	0x04bd0001,
+	0xf10217f0,
+	0xf0080015,
+	0x01d01007,
+	0xf104bd00,
+	0xf000f517,
+	0x14f10013,
+	0x10feffff,
+	0x1031f400,
+	0xf00117f0,
+	0x01d03807,
+	0xf004bd00,
+/* 0x039a: init_proc */
+	0xf19858f7,
+	0x0016b001,
+	0xf9fa0bf4,
+	0x58f0b615,
+/* 0x03ab: mulu32_32_64 */
+	0xf9f20ef4,
+	0xf920f910,
+	0x9540f930,
+	0xd29510e1,
+	0xbdc4bd10,
+	0xc0edffb4,
+	0xb9301dff,
+	0x34f10234,
+	0x34b6ffff,
+	0x1045b610,
+	0xbb00c3bb,
+	0xe2ff01b4,
+	0x0234b930,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xbb3012ff,
+	0x40fc00b3,
+	0x20fc30fc,
+	0x00f810fc,
+/* 0x03fc: host_send */
+	0x04b017f1,
+	0xf10011cf,
+	0xcf04a027,
+	0x12b80022,
+	0x2f0bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980270,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602e5,
+	0x0f1ec401,
+	0x04b007f1,
+	0xbd000ed0,
+	0xc30ef404,
+/* 0x043c: host_send_done */
+/* 0x043e: host_recv */
 	0x17f100f8,
-	0x14b60080,
-	0x7015f110,
-	0xd007f102,
-	0x0001d004,
-	0x17f104bd,
-	0x14b60080,
-	0xf015f110,
-	0xdc07f102,
-	0x0001d004,
-	0x17f004bd,
-	0xc407f101,
-	0x0001d004,
-	0x00f804bd,
-/* 0x04c8: memx_func_enter */
-	0x162067f1,
-	0xf55d77f1,
-	0xffff73f1,
-	0xf4026eb9,
-	0xd8b90421,
-	0x0487fd02,
-	0x80f960f9,
-	0xe0fcd0fc,
-	0xf13321f4,
-	0xf1fffe77,
+	0x13f14e49,
+	0xe1b85254,
+	0xb30bf406,
+/* 0x044c: host_recv_wait */
+	0x04cc17f1,
+	0xf10011cf,
+	0xcf04c827,
+	0x16f00022,
+	0x0612b808,
+	0xc4ec0bf4,
+	0x34b60723,
+	0xf030b704,
+	0x033b8002,
+	0x80023c80,
+	0x3e80013d,
+	0x0120b600,
+	0xf10f24f0,
+	0xd004c807,
+	0x04bd0002,
+	0xf04027f0,
+	0x02d00007,
+	0xf804bd00,
+/* 0x0495: host_init */
+	0x8017f100,
+	0x1014b600,
+	0x027015f1,
+	0x04d007f1,
+	0xbd0001d0,
+	0x8017f104,
+	0x1014b600,
+	0x02f015f1,
+	0x04dc07f1,
+	0xbd0001d0,
+	0x0117f004,
+	0x04c407f1,
+	0xbd0001d0,
+/* 0x04cb: memx_func_enter */
+	0xf100f804,
+	0xf1162067,
+	0xf1f55d77,
 	0xb9ffff73,
 	0x21f4026e,
 	0x02d8b904,
 	0xf90487fd,
 	0xfc80f960,
 	0xf4e0fcd0,
-	0x67f13321,
-	0x6eb926f0,
+	0x77f13421,
+	0x73f1fffe,
+	0x6eb9ffff,
 	0x0421f402,
 	0xfd02d8b9,
 	0x60f90487,
 	0xd0fc80f9,
 	0x21f4e0fc,
-	0x0467f033,
-	0x07e007f1,
+	0xf067f134,
+	0x026eb926,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f904,
+	0xfcd0fc80,
+	0x3421f4e0,
+	0xf10467f0,
+	0xd007e007,
+	0x04bd0006,
+/* 0x0534: memx_func_enter_wait */
+	0x07c067f1,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f6,
+	0x800066cf,
+	0x00f8f106,
+/* 0x054c: memx_func_leave */
+	0xcf2c67f0,
+	0x06800066,
+	0x0467f0f2,
+	0x07e407f1,
 	0xbd0006d0,
-/* 0x0531: memx_func_enter_wait */
+/* 0x0561: memx_func_leave_wait */
 	0xc067f104,
 	0x0066cf07,
 	0xf40464f0,
-	0x67f0f60b,
-	0x0066cf2c,
-	0xf8f10680,
-/* 0x0549: memx_func_leave */
-	0x2c67f000,
-	0x800066cf,
-	0x67f0f206,
-	0xe407f104,
-	0x0006d007,
-/* 0x055e: memx_func_leave_wait */
-	0x67f104bd,
-	0x66cf07c0,
-	0x0464f000,
-	0xf1f61bf4,
-	0xf126f067,
-	0xf0000177,
+	0x67f1f61b,
+	0x77f126f0,
+	0x73f00001,
+	0x026eb900,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f905,
+	0xfcd0fc80,
+	0x3421f4e0,
+	0x162067f1,
+	0xf4026eb9,
+	0xd8b90421,
+	0x0587fd02,
+	0x80f960f9,
+	0xe0fcd0fc,
+	0xf13421f4,
+	0xf00aa277,
 	0x6eb90073,
 	0x0421f402,
 	0xfd02d8b9,
 	0x60f90587,
 	0xd0fc80f9,
 	0x21f4e0fc,
-	0x2067f133,
-	0x026eb916,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f905,
-	0xfcd0fc80,
-	0x3321f4e0,
-	0x0aa277f1,
-	0xb90073f0,
-	0x21f4026e,
-	0x02d8b904,
-	0xf90587fd,
-	0xfc80f960,
-	0xf4e0fcd0,
-	0x00f83321,
-/* 0x05c8: memx_func_wait_vblank */
-	0xf80410b6,
-/* 0x05cd: memx_func_wr32 */
-	0x00169800,
-	0xb6011598,
-	0x60f90810,
-	0xd0fc50f9,
-	0x21f4e0fc,
-	0x0242b633,
-	0xf8e91bf4,
-/* 0x05e9: memx_func_wait */
-	0x2c87f000,
-	0x980088cf,
-	0x1d98001e,
-	0x021c9801,
-	0xb6031b98,
-	0x21f41010,
-/* 0x0603: memx_func_delay */
-	0x9800f886,
-	0x10b6001e,
-	0x6721f404,
-/* 0x060e: memx_func_train */
-	0x00f800f8,
-/* 0x0610: memx_exec */
-	0xd0f9e0f9,
-	0xb902c1b9,
-/* 0x061a: memx_exec_next */
-	0x139802b2,
+/* 0x05cb: memx_func_wait_vblank */
+	0xb600f834,
+	0x00f80410,
+/* 0x05d0: memx_func_wr32 */
+	0x98001698,
+	0x10b60115,
+	0xf960f908,
+	0xfcd0fc50,
+	0x3421f4e0,
+	0xf40242b6,
+	0x00f8e91b,
+/* 0x05ec: memx_func_wait */
+	0xcf2c87f0,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0x8521f410,
+/* 0x0606: memx_func_delay */
+	0x1e9800f8,
 	0x0410b600,
-	0x01f034e7,
-	0x01e033e7,
-	0xf00132b6,
-	0x35980c30,
-	0xb855f9de,
-	0x1ef40612,
-	0xf10b98e4,
-	0xbbf20c98,
-	0xb7f102cb,
-	0xbbcf07c4,
-	0xfcd0fc00,
-	0xe621f5e0,
-/* 0x0653: memx_info */
-	0x7000f802,
-	0x0bf401c6,
-/* 0x0659: memx_info_data */
-	0xccc7f10e,
-	0x00b7f103,
-	0x0b0ef408,
-/* 0x0664: memx_info_train */
-	0x0bccc7f1,
-	0x0100b7f1,
-/* 0x066c: memx_info_send */
-	0x02e621f5,
-/* 0x0672: memx_recv */
-	0xd6b000f8,
-	0x9b0bf401,
-	0xf400d6b0,
-	0x00f8d80b,
-/* 0x0680: memx_init */
-/* 0x0682: perf_recv */
-	0x00f800f8,
-/* 0x0684: perf_init */
-/* 0x0686: i2c_drive_scl */
-	0x36b000f8,
-	0x0e0bf400,
-	0x07e007f1,
-	0xbd0001d0,
-/* 0x0697: i2c_drive_scl_lo */
-	0xf100f804,
-	0xd007e407,
+	0xf86621f4,
+/* 0x0611: memx_func_train */
+/* 0x0613: memx_exec */
+	0xf900f800,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x061d: memx_exec_next */
+	0x00139802,
+	0xe70410b6,
+	0xe701f034,
+	0xb601e033,
+	0x30f00132,
+	0xde35980c,
+	0x12b855f9,
+	0xe41ef406,
+	0x98f10b98,
+	0xcbbbf20c,
+	0xc4b7f102,
+	0x00bbcf07,
+	0xe0fcd0fc,
+	0x02e521f5,
+/* 0x0656: memx_info */
+	0xc67000f8,
+	0x0e0bf401,
+/* 0x065c: memx_info_data */
+	0x03ccc7f1,
+	0x0800b7f1,
+/* 0x0667: memx_info_train */
+	0xf10b0ef4,
+	0xf10bccc7,
+/* 0x066f: memx_info_send */
+	0xf50100b7,
+	0xf802e521,
+/* 0x0675: memx_recv */
+	0x01d6b000,
+	0xb09b0bf4,
+	0x0bf400d6,
+/* 0x0683: memx_init */
+	0xf800f8d8,
+/* 0x0685: perf_recv */
+/* 0x0687: perf_init */
+	0xf800f800,
+/* 0x0689: i2c_drive_scl */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
 	0x04bd0001,
-/* 0x06a2: i2c_drive_sda */
-	0x36b000f8,
-	0x0e0bf400,
-	0x07e007f1,
-	0xbd0002d0,
-/* 0x06b3: i2c_drive_sda_lo */
-	0xf100f804,
-	0xd007e407,
+/* 0x069a: i2c_drive_scl_lo */
+	0x07f100f8,
+	0x01d007e4,
+	0xf804bd00,
+/* 0x06a5: i2c_drive_sda */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
 	0x04bd0002,
-/* 0x06be: i2c_sense_scl */
+/* 0x06b6: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x02d007e4,
+	0xf804bd00,
+/* 0x06c1: i2c_sense_scl */
+	0x0132f400,
+	0x07c437f1,
+	0xfd0033cf,
+	0x0bf40431,
+	0x0131f406,
+/* 0x06d4: i2c_sense_scl_done */
+/* 0x06d6: i2c_sense_sda */
 	0x32f400f8,
 	0xc437f101,
 	0x0033cf07,
-	0xf40431fd,
+	0xf40432fd,
 	0x31f4060b,
-/* 0x06d1: i2c_sense_scl_done */
-/* 0x06d3: i2c_sense_sda */
-	0xf400f801,
-	0x37f10132,
-	0x33cf07c4,
-	0x0432fd00,
-	0xf4060bf4,
-/* 0x06e6: i2c_sense_sda_done */
-	0x00f80131,
-/* 0x06e8: i2c_raise_scl */
-	0x47f140f9,
-	0x37f00898,
-	0x8621f501,
-/* 0x06f5: i2c_raise_scl_wait */
-	0xe8e7f106,
-	0x6721f403,
-	0x06be21f5,
-	0xb60901f4,
-	0x1bf40142,
-/* 0x0709: i2c_raise_scl_done */
-	0xf840fcef,
-/* 0x070d: i2c_start */
-	0xbe21f500,
-	0x0d11f406,
-	0x06d321f5,
-	0xf40611f4,
-/* 0x071e: i2c_start_rep */
-	0x37f0300e,
-	0x8621f500,
-	0x0137f006,
-	0x06a221f5,
+/* 0x06e9: i2c_sense_sda_done */
+/* 0x06eb: i2c_raise_scl */
+	0xf900f801,
+	0x9847f140,
+	0x0137f008,
+	0x068921f5,
+/* 0x06f8: i2c_raise_scl_wait */
+	0x03e8e7f1,
+	0xf56621f4,
+	0xf406c121,
+	0x42b60901,
+	0xef1bf401,
+/* 0x070c: i2c_raise_scl_done */
+	0x00f840fc,
+/* 0x0710: i2c_start */
+	0x06c121f5,
+	0xf50d11f4,
+	0xf406d621,
+	0x0ef40611,
+/* 0x0721: i2c_start_rep */
+	0x0037f030,
+	0x068921f5,
+	0xf50137f0,
+	0xbb06a521,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x06eb21f5,
+	0xf40464b6,
+/* 0x074e: i2c_start_send */
+	0x37f01f11,
+	0xa521f500,
+	0x88e7f106,
+	0x6621f413,
+	0xf50037f0,
+	0xf1068921,
+	0xf41388e7,
+/* 0x076a: i2c_start_out */
+	0x00f86621,
+/* 0x076c: i2c_stop */
+	0xf50037f0,
+	0xf0068921,
+	0x21f50037,
+	0xe7f106a5,
+	0x21f403e8,
+	0x0137f066,
+	0x068921f5,
+	0x1388e7f1,
+	0xf06621f4,
+	0x21f50137,
+	0xe7f106a5,
+	0x21f41388,
+/* 0x079f: i2c_bitw */
+	0xf500f866,
+	0xf106a521,
+	0xf403e8e7,
+	0x76bb6621,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606eb21,
+	0x11f40464,
+	0x88e7f118,
+	0x6621f413,
+	0xf50037f0,
+	0xf1068921,
+	0xf41388e7,
+/* 0x07de: i2c_bitw_out */
+	0x00f86621,
+/* 0x07e0: i2c_bitr */
+	0xf50137f0,
+	0xf106a521,
+	0xf403e8e7,
+	0x76bb6621,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606eb21,
+	0x11f40464,
+	0xd621f51b,
+	0x0037f006,
+	0x068921f5,
+	0x1388e7f1,
+	0xf06621f4,
+	0x31f4013c,
+/* 0x0825: i2c_bitr_done */
+/* 0x0827: i2c_get_byte */
+	0xf000f801,
+	0x47f00057,
+/* 0x082d: i2c_get_byte_next */
+	0x0154b608,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0xe821f550,
-	0x0464b606,
-/* 0x074b: i2c_start_send */
-	0xf01f11f4,
-	0x21f50037,
-	0xe7f106a2,
-	0x21f41388,
-	0x0037f067,
-	0x068621f5,
-	0x1388e7f1,
-/* 0x0767: i2c_start_out */
-	0xf86721f4,
-/* 0x0769: i2c_stop */
-	0x0037f000,
-	0x068621f5,
-	0xf50037f0,
-	0xf106a221,
-	0xf403e8e7,
-	0x37f06721,
-	0x8621f501,
-	0x88e7f106,
-	0x6721f413,
-	0xf50137f0,
-	0xf106a221,
-	0xf41388e7,
-	0x00f86721,
-/* 0x079c: i2c_bitw */
-	0x06a221f5,
-	0x03e8e7f1,
-	0xbb6721f4,
+	0xe021f550,
+	0x0464b607,
+	0xfd2b11f4,
+	0x42b60553,
+	0xd81bf401,
+	0xbb0137f0,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x06e821f5,
-	0xf40464b6,
-	0xe7f11811,
-	0x21f41388,
-	0x0037f067,
-	0x068621f5,
-	0x1388e7f1,
-/* 0x07db: i2c_bitw_out */
-	0xf86721f4,
-/* 0x07dd: i2c_bitr */
-	0x0137f000,
-	0x06a221f5,
-	0x03e8e7f1,
-	0xbb6721f4,
+	0x079f21f5,
+/* 0x0877: i2c_get_byte_done */
+	0xf80464b6,
+/* 0x0879: i2c_put_byte */
+	0x0847f000,
+/* 0x087c: i2c_put_byte_next */
+	0xff0142b6,
+	0x76bb3854,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6079f21,
+	0x11f40464,
+	0x0046b034,
+	0xbbd81bf4,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x06e821f5,
+	0x07e021f5,
 	0xf40464b6,
-	0x21f51b11,
-	0x37f006d3,
-	0x8621f500,
-	0x88e7f106,
-	0x6721f413,
-	0xf4013cf0,
-/* 0x0822: i2c_bitr_done */
-	0x00f80131,
-/* 0x0824: i2c_get_byte */
-	0xf00057f0,
-/* 0x082a: i2c_get_byte_next */
-	0x54b60847,
+	0x76bb0f11,
+	0x0136b000,
+	0xf4061bf4,
+/* 0x08d2: i2c_put_byte_done */
+	0x00f80132,
+/* 0x08d4: i2c_addr */
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x1021f550,
+	0x0464b607,
+	0xe72911f4,
+	0xb6012ec3,
+	0x53fd0134,
+	0x0076bb05,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60879,
+/* 0x0919: i2c_addr_done */
+/* 0x091b: i2c_acquire_addr */
+	0xc700f804,
+	0xe4b6f8ce,
+	0x14e0b705,
+/* 0x0927: i2c_acquire */
+	0xf500f8d0,
+	0xf4091b21,
+	0xd9f00421,
+	0x3421f403,
+/* 0x0936: i2c_release */
+	0x21f500f8,
+	0x21f4091b,
+	0x03daf004,
+	0xf83421f4,
+/* 0x0945: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xf413a001,
+	0x0032980c,
+	0x0ccc13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x092721f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x08d421f5,
+	0xf50464b6,
+	0xc700d011,
+	0x76bbe0c5,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6087921,
+	0x11f50464,
+	0x57f000ad,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b607dd,
-	0x2b11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0x0137f0d8,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x9c21f550,
-	0x0464b607,
-/* 0x0874: i2c_get_byte_done */
-/* 0x0876: i2c_put_byte */
-	0x47f000f8,
-/* 0x0879: i2c_put_byte_next */
-	0x0142b608,
-	0xbb3854ff,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x079c21f5,
-	0xf40464b6,
-	0x46b03411,
-	0xd81bf400,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xdd21f550,
-	0x0464b607,
-	0xbb0f11f4,
-	0x36b00076,
-	0x061bf401,
-/* 0x08cf: i2c_put_byte_done */
-	0xf80132f4,
-/* 0x08d1: i2c_addr */
+	0x64b608d4,
+	0x8a11f504,
 	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b6070d,
-	0x2911f404,
-	0x012ec3e7,
-	0xfd0134b6,
-	0x76bb0553,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6087621,
-/* 0x0916: i2c_addr_done */
-	0x00f80464,
-/* 0x0918: i2c_acquire_addr */
-	0xb6f8cec7,
-	0xe0b705e4,
-	0x00f8d014,
-/* 0x0924: i2c_acquire */
-	0x091821f5,
-	0xf00421f4,
-	0x21f403d9,
-/* 0x0933: i2c_release */
-	0xf500f833,
-	0xf4091821,
-	0xdaf00421,
-	0x3321f403,
-/* 0x0942: i2c_recv */
-	0x32f400f8,
-	0xf8c1c701,
-	0xb00214b6,
-	0x1ff52816,
-	0x13a0013a,
-	0x32980cf4,
-	0xcc13a000,
-	0x0031980c,
-	0xf90231f4,
-	0xf9e0f9d0,
-	0x0067f1d0,
-	0x0063f100,
-	0x01679210,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x2421f550,
-	0x0464b609,
-	0xd6b0d0fc,
-	0xb31bf500,
-	0x0057f000,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xd121f550,
-	0x0464b608,
-	0x00d011f5,
-	0xbbe0c5c7,
+	0x64b60827,
+	0x6a11f404,
+	0xbbe05bcb,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x087621f5,
-	0xf50464b6,
-	0xf000ad11,
-	0x76bb0157,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb608d121,
-	0x11f50464,
-	0x76bb008a,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6082421,
-	0x11f40464,
-	0xe05bcb6a,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x6921f550,
-	0x0464b607,
-	0xbd025bb9,
-	0x430ef474,
-/* 0x0a48: i2c_recv_not_rd08 */
-	0xf401d6b0,
-	0x57f03d1b,
-	0xd121f500,
-	0x3311f408,
-	0xf5e0c5c7,
-	0xf4087621,
-	0x57f02911,
-	0xd121f500,
-	0x1f11f408,
-	0xf5e0b5c7,
-	0xf4087621,
-	0x21f51511,
-	0x74bd0769,
-	0xf408c5c7,
-	0x32f4091b,
-	0x030ef402,
-/* 0x0a88: i2c_recv_not_wr08 */
-/* 0x0a88: i2c_recv_done */
-	0xf5f8cec7,
-	0xfc093321,
-	0xf4d0fce0,
-	0x7cb90a12,
-	0xe621f502,
-/* 0x0a9d: i2c_recv_exit */
-/* 0x0a9f: i2c_init */
+	0x076c21f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x0a4b: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x08d421f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40879,
+	0x0057f029,
+	0x08d421f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40879,
+	0x6c21f515,
+	0xc774bd07,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x0a8b: i2c_recv_not_wr08 */
+/* 0x0a8b: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc0936,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x02e521f5,
+/* 0x0aa0: i2c_recv_exit */
+/* 0x0aa2: i2c_init */
+	0x00f800f8,
+/* 0x0aa4: test_recv */
+	0x05d817f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d8,
+	0xf104bd00,
+	0xf1d900e7,
+	0xf5134fe3,
+	0xf8021721,
+/* 0x0ac5: test_init */
+	0x00e7f100,
+	0x1721f508,
+/* 0x0acf: idle_recv */
 	0xf800f802,
-/* 0x0aa1: test_recv */
-	0xd817f100,
-	0x0011cf05,
-	0xf10110b6,
-	0xd005d807,
-	0x04bd0001,
-	0xd900e7f1,
-	0x134fe3f1,
-	0x021821f5,
-/* 0x0ac2: test_init */
-	0xe7f100f8,
-	0x21f50800,
-	0x00f80218,
-/* 0x0acc: idle_recv */
-/* 0x0ace: idle */
-	0x31f400f8,
-	0xd417f100,
-	0x0011cf05,
-	0xf10110b6,
-	0xd005d407,
-	0x04bd0001,
-/* 0x0ae4: idle_loop */
-	0xf45817f0,
-/* 0x0aea: idle_proc */
-/* 0x0aea: idle_proc_exec */
-	0x10f90232,
-	0xf5021eb9,
-	0xfc02ef21,
-	0x0911f410,
-	0xf40231f4,
-/* 0x0afe: idle_proc_next */
-	0x10b6ef0e,
-	0x061fb858,
-	0xf4e61bf4,
-	0x28f4dd02,
-	0xc10ef400,
-	0x00000000,
+/* 0x0ad1: idle */
+	0x0031f400,
+	0x05d417f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d4,
+/* 0x0ae7: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x0aed: idle_proc */
+/* 0x0aed: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02ee,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x0b01: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00c10ef4,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 8a2b628..3c731ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -24,8 +24,8 @@
 	0x00000000,
 /* 0x0058: proc_list_head */
 	0x54534f48,
-	0x00000447,
-	0x000003f8,
+	0x0000042c,
+	0x000003df,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000621,
-	0x00000613,
+	0x000005f3,
+	0x000005e5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x00000625,
-	0x00000623,
+	0x000005f7,
+	0x000005f5,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000a29,
-	0x000008d0,
+	0x000009f8,
+	0x000008a2,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000a4a,
-	0x00000a2b,
+	0x00000a16,
+	0x000009fa,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000a55,
-	0x00000a53,
+	0x00000a21,
+	0x00000a1f,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -229,26 +229,26 @@
 /* 0x0370: memx_func_head */
 	0x00000001,
 	0x00000000,
-	0x00000477,
+	0x0000045c,
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x000004f4,
+	0x000004cf,
 	0x00000003,
 	0x00000002,
-	0x00000574,
+	0x00000546,
 	0x00040004,
 	0x00000000,
-	0x00000591,
+	0x00000563,
 	0x00010005,
 	0x00000000,
-	0x000005ab,
+	0x0000057d,
 	0x00010006,
 	0x00000000,
-	0x0000056f,
+	0x00000541,
 	0x00000007,
 	0x00000000,
-	0x000005b7,
+	0x00000589,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -916,784 +916,771 @@
 };
 
 uint32_t gk208_pmu_code[] = {
-	0x03100ef5,
+	0x02f90ef5,
 /* 0x0004: rd32 */
 	0xf607a040,
 	0x04bd000e,
-	0xd3f0010d,
+	0x0100018d,
+	0xf607ac40,
+	0x04bd000d,
+/* 0x0018: rd32_wait */
+	0xcf07ac4d,
+	0xd4f100dd,
+	0x1bf47000,
+	0x07a44df6,
+	0xf800ddcf,
+/* 0x002d: wr32 */
+	0x07a04000,
+	0xbd000ef6,
+	0x07a44004,
+	0xbd000df6,
+	0x00f28d04,
 	0x07ac4001,
 	0xbd000df6,
-/* 0x0019: rd32_wait */
+/* 0x0049: wr32_wait */
 	0x07ac4d04,
 	0xf100ddcf,
 	0xf47000d4,
-	0xa44df61b,
-	0x00ddcf07,
-/* 0x002e: wr32 */
-	0xa04000f8,
-	0x000ef607,
-	0xa44004bd,
-	0x000df607,
-	0x020d04bd,
-	0xf0f0d5f0,
-	0xac4001d3,
-	0x000df607,
-/* 0x004e: wr32_wait */
-	0xac4d04bd,
-	0x00ddcf07,
-	0x7000d4f1,
-	0xf8f61bf4,
-/* 0x005d: nsec */
-	0xf990f900,
-	0xcf2c0880,
-/* 0x0066: nsec_loop */
-	0x2c090088,
-	0xbb0099cf,
-	0x9ea60298,
-	0xfcf61ef4,
-	0xf890fc80,
-/* 0x0079: wait */
-	0xf990f900,
-	0xcf2c0880,
-/* 0x0082: wait_loop */
-	0xeeb20088,
-	0x0000047e,
-	0xadfddab2,
-	0xf4aca604,
-	0x2c09100b,
-	0xbb0099cf,
-	0x9ba60298,
-/* 0x009f: wait_done */
-	0xfce61ef4,
-	0xf890fc80,
-/* 0x00a5: intr_watchdog */
-	0x03e99800,
-	0xf40096b0,
-	0x0a98280b,
-	0x029abb9a,
-	0x0d0e1cf4,
-	0x02557e01,
-	0xf494bd00,
-/* 0x00c2: intr_watchdog_next_time */
-	0x0a98140e,
-	0x00a6b09b,
-	0xa6080bf4,
-	0x061cf49a,
-/* 0x00d0: intr_watchdog_next_time_set */
-/* 0x00d3: intr_watchdog_next_proc */
-	0xb59b09b5,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc81bf402,
-/* 0x00e2: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0x000ff0f9,
-	0xf90188fe,
-	0x04504880,
-	0xb60088cf,
-	0x50400180,
-	0x0008f604,
-	0x080804bd,
-	0xc40088cf,
-	0x0bf40289,
-	0x9b00b51f,
-	0xa57e580e,
-	0x09980000,
-	0x0096b09b,
-	0x000d0bf4,
-	0x0009f634,
-	0x09b504bd,
-/* 0x0135: intr_skip_watchdog */
-	0x0089e49a,
-	0x360bf408,
-	0xcf068849,
-	0x9ac40099,
-	0x220bf402,
-	0xcf04c04c,
-	0xc0f900cc,
-	0xf14f484e,
-	0x0d5453e3,
-	0x02b67e00,
-	0x40c0fc00,
-	0x0cf604c0,
-/* 0x0167: intr_subintr_skip_fifo */
-	0x4004bd00,
-	0x09f60688,
-/* 0x016f: intr_skip_subintr */
-	0x4904bd00,
-	0x90bd00e0,
-	0x000489fd,
-	0x0008f604,
-	0x80fc04bd,
-	0xfc0088fe,
-	0xfce0fcf0,
-	0xfcc0fcd0,
-	0xfca0fcb0,
-	0xfc80fc90,
-	0x0032f400,
-/* 0x019a: ticks_from_ns */
-	0xc0f901f8,
-	0xd7f1b0f9,
-	0xd3f00144,
-	0x6b21f500,
-	0xe8ccec03,
-	0x00b4b003,
-	0xec120bf4,
-	0xf103e8ee,
-	0xf00144d7,
-	0x21f500d3,
-/* 0x01c2: ticks_from_ns_quit */
-	0xceb2036b,
-	0xc0fcb0fc,
-/* 0x01ca: ticks_from_us */
-	0xc0f900f8,
-	0xd7f1b0f9,
-	0xd3f00144,
-	0x6b21f500,
-	0xb0ceb203,
-	0x0bf400b4,
-/* 0x01e3: ticks_from_us_quit */
-	0xfce4bd05,
-	0xf8c0fcb0,
-/* 0x01e9: ticks_to_us */
-	0x44d7f100,
-	0x00d3f001,
-	0xf8ecedff,
-/* 0x01f5: timer */
-	0xf990f900,
-	0x1032f480,
-	0xb003f898,
-	0x1cf40086,
-	0x0084bd4a,
-	0x0008f638,
-	0x340804bd,
-	0x980088cf,
-	0x98bb9a09,
-	0x00e9bb02,
-	0x0803feb5,
-	0x0088cf08,
-	0xf40284f0,
-	0x34081c1b,
-	0xa60088cf,
-	0x080bf4e0,
-	0x1cf4e8a6,
-/* 0x0239: timer_reset */
-	0xf634000d,
-	0x04bd000e,
-/* 0x0243: timer_enable */
-	0x089a0eb5,
-	0xf6380001,
-	0x04bd0008,
-/* 0x024c: timer_done */
-	0xfc1031f4,
-	0xf890fc80,
-/* 0x0255: send_proc */
-	0xf980f900,
-	0x05e89890,
-	0xf004e998,
-	0x89a60486,
-	0xc42a0bf4,
-	0x88940398,
-	0x1880b604,
-	0x98008ebb,
-	0x8ab500fa,
-	0x018db500,
-	0xb5028cb5,
-	0x90b6038b,
-	0x0794f001,
-	0xf404e9b5,
-/* 0x028e: send_done */
-	0x90fc0231,
-	0x00f880fc,
-/* 0x0294: find */
-	0x580880f9,
-/* 0x029b: find_loop */
-	0x980131f4,
-	0xaea6008a,
-	0xb6100bf4,
-	0x86b15880,
-	0x1bf40268,
-	0x0132f4f1,
-/* 0x02b0: find_done */
-	0x80fc8eb2,
-/* 0x02b6: send */
-	0x947e00f8,
-	0x01f40002,
-/* 0x02bf: recv */
-	0xf900f89b,
-	0x9880f990,
-	0xe99805e8,
-	0x0132f404,
-	0x0bf489a6,
-	0x0389c43c,
-	0xf00180b6,
-	0xe8b50784,
-	0x02ea9805,
-	0x8ffef0f9,
-	0xb2f0f901,
-	0x049994ef,
-	0xb600e9bb,
-	0xeb9818e0,
-	0x02ec9803,
-	0x9801ed98,
-	0xa5f900ee,
-	0xf8fef0fc,
-	0x0131f400,
-/* 0x030a: recv_done */
-	0x80fcf0fc,
+	0x00f8f61b,
+/* 0x0058: nsec */
+	0x80f990f9,
+	0x88cf2c08,
+/* 0x0061: nsec_loop */
+	0xcf2c0900,
+	0x98bb0099,
+	0xf49ea602,
+	0x80fcf61e,
 	0x00f890fc,
-/* 0x0310: init */
-	0xcf010841,
-	0x11e70011,
-	0x14b60109,
-	0x0014fe08,
-	0xf000e041,
-	0x1c000013,
-	0xbd0001f6,
-	0x00ff0104,
-	0x0001f614,
-	0x020104bd,
-	0x080015f1,
-	0x01f61000,
-	0x4104bd00,
-	0x13f000e2,
-	0x0010fe00,
-	0x011031f4,
-	0xf6380001,
+/* 0x0074: wait */
+	0x80f990f9,
+	0x88cf2c08,
+/* 0x007d: wait_loop */
+	0x7eeeb200,
+	0xb2000004,
+	0x04adfdda,
+	0x0bf4aca6,
+	0xcf2c0910,
+	0x98bb0099,
+	0xf49ba602,
+/* 0x009a: wait_done */
+	0x80fce61e,
+	0x00f890fc,
+/* 0x00a0: intr_watchdog */
+	0xb003e998,
+	0x0bf40096,
+	0x9a0a9828,
+	0xf4029abb,
+	0x010d0e1c,
+	0x00023e7e,
+	0x0ef494bd,
+/* 0x00bd: intr_watchdog_next_time */
+	0x9b0a9814,
+	0xf400a6b0,
+	0x9aa6080b,
+/* 0x00cb: intr_watchdog_next_time_set */
+	0xb5061cf4,
+/* 0x00ce: intr_watchdog_next_proc */
+	0xe9b59b09,
+	0x58e0b603,
+	0x0268e6b1,
+	0xf8c81bf4,
+/* 0x00dd: intr */
+	0xbd00f900,
+	0xf980f904,
+	0xf9a0f990,
+	0xf9c0f9b0,
+	0xf9e0f9d0,
+	0xfe000ff0,
+	0x80f90188,
+	0xcf045048,
+	0x80b60088,
+	0x04504001,
+	0xbd0008f6,
+	0xcf080804,
+	0x89c40088,
+	0x1f0bf402,
+	0x0e9b00b5,
+	0x00a07e58,
+	0x9b099800,
+	0xf40096b0,
+	0x34000d0b,
+	0xbd0009f6,
+	0x9a09b504,
+/* 0x0130: intr_skip_watchdog */
+	0x080089e4,
+	0x49340bf4,
+	0x99cf0688,
+	0x029ac400,
+	0x4c200bf4,
+	0xcccf04c0,
+	0xdec0f900,
+	0x54534f48,
+	0x9f7e000d,
+	0xc0fc0002,
+	0xf604c040,
+	0x04bd000c,
+/* 0x0160: intr_subintr_skip_fifo */
+	0xf6068840,
+	0x04bd0009,
+/* 0x0168: intr_skip_subintr */
+	0xbd00e049,
+	0x0489fd90,
+	0x08f60400,
+	0xfc04bd00,
+	0x0088fe80,
+	0xe0fcf0fc,
+	0xc0fcd0fc,
+	0xa0fcb0fc,
+	0x80fc90fc,
+	0x32f400fc,
+/* 0x0193: ticks_from_ns */
+	0xf901f800,
+	0x4db0f9c0,
+	0x527e0144,
+	0xccec0003,
+	0xb4b003e8,
+	0x0e0bf400,
+	0x03e8eeec,
+	0x7e01444d,
+/* 0x01b3: ticks_from_ns_quit */
+	0xb2000352,
+	0xfcb0fcce,
+/* 0x01bb: ticks_from_us */
+	0xf900f8c0,
+	0x4db0f9c0,
+	0x527e0144,
+	0xceb20003,
+	0xf400b4b0,
+	0xe4bd050b,
+/* 0x01d0: ticks_from_us_quit */
+	0xc0fcb0fc,
+/* 0x01d6: ticks_to_us */
+	0x444d00f8,
+	0xecedff01,
+/* 0x01de: timer */
+	0x90f900f8,
+	0x32f480f9,
+	0x03f89810,
+	0xf40086b0,
+	0x84bd4a1c,
+	0x08f63800,
+	0x0804bd00,
+	0x0088cf34,
+	0xbb9a0998,
+	0xe9bb0298,
+	0x03feb500,
+	0x88cf0808,
+	0x0284f000,
+	0x081c1bf4,
+	0x0088cf34,
+	0x0bf4e0a6,
+	0xf4e8a608,
+/* 0x0222: timer_reset */
+	0x34000d1c,
+	0xbd000ef6,
+	0x9a0eb504,
+/* 0x022c: timer_enable */
+	0x38000108,
+	0xbd0008f6,
+/* 0x0235: timer_done */
+	0x1031f404,
+	0x90fc80fc,
+/* 0x023e: send_proc */
+	0x80f900f8,
+	0xe89890f9,
+	0x04e99805,
+	0xa60486f0,
+	0x2a0bf489,
+	0x940398c4,
+	0x80b60488,
+	0x008ebb18,
+	0xb500fa98,
+	0x8db5008a,
+	0x028cb501,
+	0xb6038bb5,
+	0x94f00190,
+	0x04e9b507,
+/* 0x0277: send_done */
+	0xfc0231f4,
+	0xf880fc90,
+/* 0x027d: find */
+	0x0880f900,
+	0x0131f458,
+/* 0x0284: find_loop */
+	0xa6008a98,
+	0x100bf4ae,
+	0xb15880b6,
+	0xf4026886,
+	0x32f4f11b,
+/* 0x0299: find_done */
+	0xfc8eb201,
+/* 0x029f: send */
+	0x7e00f880,
+	0xf400027d,
+	0x00f89b01,
+/* 0x02a8: recv */
+	0x80f990f9,
+	0x9805e898,
+	0x32f404e9,
+	0xf489a601,
+	0x89c43c0b,
+	0x0180b603,
+	0xb50784f0,
+	0xea9805e8,
+	0xfef0f902,
+	0xf0f9018f,
+	0x9994efb2,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x02f3: recv_done */
+	0xfcf0fc01,
+	0xf890fc80,
+/* 0x02f9: init */
+	0x01084100,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0x00e04100,
+	0x01f61c00,
+	0x0104bd00,
+	0xf61400ff,
 	0x04bd0001,
-/* 0x035a: init_proc */
-	0xf198580f,
-	0x0016b001,
-	0xf9fa0bf4,
-	0x58f0b615,
-/* 0x036b: mulu32_32_64 */
-	0xf9f20ef4,
-	0xf920f910,
-	0x9540f930,
-	0xd29510e1,
-	0xbdc4bd10,
-	0xc0edffb4,
-	0xb2301dff,
+	0x15f10201,
+	0x10000800,
+	0xbd0001f6,
+	0x00dd4104,
+	0xffff14f1,
+	0xf40010fe,
+	0x01011031,
+	0x01f63800,
+	0x0f04bd00,
+/* 0x0341: init_proc */
+	0x01f19858,
+	0xf40016b0,
+	0x15f9fa0b,
+	0xf458f0b6,
+/* 0x0352: mulu32_32_64 */
+	0x10f9f20e,
+	0x30f920f9,
+	0xe19540f9,
+	0x10d29510,
+	0xb4bdc4bd,
+	0xffc0edff,
+	0x34b2301d,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xb230e2ff,
 	0xff34f134,
 	0x1034b6ff,
 	0xbb1045b6,
 	0xb4bb00c3,
-	0x30e2ff01,
-	0x34f134b2,
-	0x34b6ffff,
-	0x1045b610,
-	0xbb00c3bb,
-	0x12ff01b4,
-	0x00b3bb30,
-	0x30fc40fc,
-	0x10fc20fc,
-/* 0x03ba: host_send */
-	0xb04100f8,
-	0x0011cf04,
-	0xcf04a042,
-	0x12a60022,
-	0xc42e0bf4,
-	0xee94071e,
-	0x70e0b704,
-	0x03eb9802,
-	0x9802ec98,
-	0xee9801ed,
-	0x02b67e00,
-	0x0110b600,
-	0x400f1ec4,
-	0x0ef604b0,
-	0xf404bd00,
-/* 0x03f6: host_send_done */
-	0x00f8c70e,
-/* 0x03f8: host_recv */
-	0xf14e4941,
-	0xa6525413,
-	0xb90bf4e1,
-/* 0x0404: host_recv_wait */
-	0xcf04cc41,
-	0xc8420011,
-	0x0022cf04,
-	0xa60816f0,
-	0xef0bf412,
-	0xb60723c4,
-	0x30b70434,
-	0x3bb502f0,
-	0x023cb503,
-	0xb5013db5,
-	0x20b6003e,
-	0x0f24f001,
-	0xf604c840,
-	0x04bd0002,
-	0x00004002,
+	0x3012ff01,
+	0xfc00b3bb,
+	0xfc30fc40,
+	0xf810fc20,
+/* 0x03a1: host_send */
+	0x04b04100,
+	0x420011cf,
+	0x22cf04a0,
+	0xf412a600,
+	0x1ec42e0b,
+	0x04ee9407,
+	0x0270e0b7,
+	0x9803eb98,
+	0xed9802ec,
+	0x00ee9801,
+	0x00029f7e,
+	0xc40110b6,
+	0xb0400f1e,
+	0x000ef604,
+	0x0ef404bd,
+/* 0x03dd: host_send_done */
+/* 0x03df: host_recv */
+	0xd100f8c7,
+	0x52544e49,
+	0x0bf4e1a6,
+/* 0x03e9: host_recv_wait */
+	0x04cc41bb,
+	0x420011cf,
+	0x22cf04c8,
+	0x0816f000,
+	0x0bf412a6,
+	0x0723c4ef,
+	0xb70434b6,
+	0xb502f030,
+	0x3cb5033b,
+	0x013db502,
+	0xb6003eb5,
+	0x24f00120,
+	0x04c8400f,
 	0xbd0002f6,
-/* 0x0447: host_init */
-	0x4100f804,
-	0x14b60080,
-	0x7015f110,
-	0x04d04002,
-	0xbd0001f6,
-	0x00804104,
-	0xf11014b6,
-	0x4002f015,
-	0x01f604dc,
-	0x0104bd00,
-	0x04c44001,
-	0xbd0001f6,
-/* 0x0477: memx_func_enter */
-	0xf100f804,
-	0xf1162067,
-	0xf1f55d77,
-	0xb2ffff73,
-	0x00047e6e,
-	0xfdd8b200,
-	0x60f90487,
-	0xd0fc80f9,
-	0x2e7ee0fc,
-	0x77f10000,
-	0x73f1fffe,
-	0x6eb2ffff,
-	0x0000047e,
-	0x87fdd8b2,
-	0xf960f904,
-	0xfcd0fc80,
-	0x002e7ee0,
-	0xf067f100,
-	0x7e6eb226,
+	0x00400204,
+	0x0002f600,
+	0x00f804bd,
+/* 0x042c: host_init */
+	0xb6008041,
+	0x15f11014,
+	0xd0400270,
+	0x0001f604,
+	0x804104bd,
+	0x1014b600,
+	0x02f015f1,
+	0xf604dc40,
+	0x04bd0001,
+	0xc4400101,
+	0x0001f604,
+	0x00f804bd,
+/* 0x045c: memx_func_enter */
+	0x162067f1,
+	0xf55d77f1,
+	0x047e6eb2,
+	0xd8b20000,
+	0xf90487fd,
+	0xfc80f960,
+	0x7ee0fcd0,
+	0x0700002d,
+	0x7e6eb2fe,
 	0xb2000004,
 	0x0487fdd8,
 	0x80f960f9,
 	0xe0fcd0fc,
-	0x00002e7e,
-	0xe0400406,
-	0x0006f607,
-/* 0x04de: memx_func_enter_wait */
-	0xc04604bd,
-	0x0066cf07,
-	0xf40464f0,
-	0x2c06f70b,
-	0xb50066cf,
-	0x00f8f106,
-/* 0x04f4: memx_func_leave */
-	0x66cf2c06,
-	0xf206b500,
-	0xe4400406,
-	0x0006f607,
-/* 0x0506: memx_func_leave_wait */
-	0xc04604bd,
-	0x0066cf07,
-	0xf40464f0,
-	0x67f1f71b,
-	0x77f126f0,
-	0x73f00001,
-	0x7e6eb200,
-	0xb2000004,
-	0x0587fdd8,
-	0x80f960f9,
-	0xe0fcd0fc,
-	0x00002e7e,
-	0x162067f1,
+	0x00002d7e,
+	0x26f067f1,
+	0x047e6eb2,
+	0xd8b20000,
+	0xf90487fd,
+	0xfc80f960,
+	0x7ee0fcd0,
+	0x0600002d,
+	0x07e04004,
+	0xbd0006f6,
+/* 0x04b9: memx_func_enter_wait */
+	0x07c04604,
+	0xf00066cf,
+	0x0bf40464,
+	0xcf2c06f7,
+	0x06b50066,
+/* 0x04cf: memx_func_leave */
+	0x0600f8f1,
+	0x0066cf2c,
+	0x06f206b5,
+	0x07e44004,
+	0xbd0006f6,
+/* 0x04e1: memx_func_leave_wait */
+	0x07c04604,
+	0xf00066cf,
+	0x1bf40464,
+	0xf067f1f7,
+	0xb2010726,
+	0x00047e6e,
+	0xfdd8b200,
+	0x60f90587,
+	0xd0fc80f9,
+	0x2d7ee0fc,
+	0x67f10000,
+	0x6eb21620,
+	0x0000047e,
+	0x87fdd8b2,
+	0xf960f905,
+	0xfcd0fc80,
+	0x002d7ee0,
+	0x0aa24700,
 	0x047e6eb2,
 	0xd8b20000,
 	0xf90587fd,
 	0xfc80f960,
 	0x7ee0fcd0,
-	0xf100002e,
-	0xf00aa277,
-	0x6eb20073,
-	0x0000047e,
-	0x87fdd8b2,
-	0xf960f905,
-	0xfcd0fc80,
-	0x002e7ee0,
-/* 0x056f: memx_func_wait_vblank */
-	0xb600f800,
-	0x00f80410,
-/* 0x0574: memx_func_wr32 */
-	0x98001698,
-	0x10b60115,
-	0xf960f908,
-	0xfcd0fc50,
-	0x002e7ee0,
-	0x0242b600,
-	0xf8e81bf4,
-/* 0x0591: memx_func_wait */
-	0xcf2c0800,
-	0x1e980088,
-	0x011d9800,
-	0x98021c98,
-	0x10b6031b,
-	0x00797e10,
-/* 0x05ab: memx_func_delay */
-	0x9800f800,
-	0x10b6001e,
-	0x005d7e04,
-/* 0x05b7: memx_func_train */
+	0xf800002d,
+/* 0x0541: memx_func_wait_vblank */
+	0x0410b600,
+/* 0x0546: memx_func_wr32 */
+	0x169800f8,
+	0x01159800,
+	0xf90810b6,
+	0xfc50f960,
+	0x7ee0fcd0,
+	0xb600002d,
+	0x1bf40242,
+/* 0x0563: memx_func_wait */
+	0x0800f8e8,
+	0x0088cf2c,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0x7e1010b6,
+	0xf8000074,
+/* 0x057d: memx_func_delay */
+	0x001e9800,
+	0x7e0410b6,
+	0xf8000058,
+/* 0x0589: memx_func_train */
+/* 0x058b: memx_exec */
+	0xf900f800,
+	0xb2d0f9e0,
+/* 0x0593: memx_exec_next */
+	0x98b2b2c1,
+	0x10b60013,
+	0xf034e704,
+	0xe033e701,
+	0x0132b601,
+	0x980c30f0,
+	0x55f9de35,
+	0x1ef412a6,
+	0xf10b98e5,
+	0xbbf20c98,
+	0xc44b02cb,
+	0x00bbcf07,
+	0xe0fcd0fc,
+	0x00029f7e,
+/* 0x05ca: memx_info */
+	0xc67000f8,
+	0x0c0bf401,
+/* 0x05d0: memx_info_data */
+	0x4b03cc4c,
+	0x0ef40800,
+/* 0x05d9: memx_info_train */
+	0x0bcc4c09,
+/* 0x05df: memx_info_send */
+	0x7e01004b,
+	0xf800029f,
+/* 0x05e5: memx_recv */
+	0x01d6b000,
+	0xb0a30bf4,
+	0x0bf400d6,
+/* 0x05f3: memx_init */
+	0xf800f8dc,
+/* 0x05f5: perf_recv */
+/* 0x05f7: perf_init */
 	0xf800f800,
-/* 0x05b9: memx_exec */
-	0xf9e0f900,
-	0xb2c1b2d0,
-/* 0x05c1: memx_exec_next */
-	0x001398b2,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12a655f9,
-	0x98e51ef4,
-	0x0c98f10b,
-	0x02cbbbf2,
-	0xcf07c44b,
-	0xd0fc00bb,
-	0xb67ee0fc,
-	0x00f80002,
-/* 0x05f8: memx_info */
-	0xf401c670,
-/* 0x05fe: memx_info_data */
-	0xcc4c0c0b,
-	0x08004b03,
-/* 0x0607: memx_info_train */
-	0x4c090ef4,
-	0x004b0bcc,
-/* 0x060d: memx_info_send */
-	0x02b67e01,
-/* 0x0613: memx_recv */
-	0xb000f800,
-	0x0bf401d6,
-	0x00d6b0a3,
-	0xf8dc0bf4,
-/* 0x0621: memx_init */
-/* 0x0623: perf_recv */
-	0xf800f800,
-/* 0x0625: perf_init */
-/* 0x0627: i2c_drive_scl */
-	0xb000f800,
-	0x0bf40036,
-	0x07e0400d,
-	0xbd0001f6,
-/* 0x0637: i2c_drive_scl_lo */
-	0x4000f804,
-	0x01f607e4,
-	0xf804bd00,
-/* 0x0641: i2c_drive_sda */
+/* 0x05f9: i2c_drive_scl */
 	0x0036b000,
 	0x400d0bf4,
-	0x02f607e0,
+	0x01f607e0,
 	0xf804bd00,
-/* 0x0651: i2c_drive_sda_lo */
+/* 0x0609: i2c_drive_scl_lo */
 	0x07e44000,
+	0xbd0001f6,
+/* 0x0613: i2c_drive_sda */
+	0xb000f804,
+	0x0bf40036,
+	0x07e0400d,
 	0xbd0002f6,
-/* 0x065b: i2c_sense_scl */
-	0xf400f804,
-	0xc4430132,
-	0x0033cf07,
-	0xf40431fd,
-	0x31f4060b,
-/* 0x066d: i2c_sense_scl_done */
-/* 0x066f: i2c_sense_sda */
-	0xf400f801,
-	0xc4430132,
-	0x0033cf07,
-	0xf40432fd,
-	0x31f4060b,
-/* 0x0681: i2c_sense_sda_done */
-/* 0x0683: i2c_raise_scl */
-	0xf900f801,
-	0x08984440,
-	0x277e0103,
-/* 0x068e: i2c_raise_scl_wait */
-	0xe84e0006,
-	0x005d7e03,
-	0x065b7e00,
-	0x0901f400,
-	0xf40142b6,
-/* 0x06a2: i2c_raise_scl_done */
-	0x40fcef1b,
-/* 0x06a6: i2c_start */
-	0x5b7e00f8,
-	0x11f40006,
-	0x066f7e0d,
-	0x0611f400,
-/* 0x06b7: i2c_start_rep */
-	0x032e0ef4,
-	0x06277e00,
-	0x7e010300,
-	0xbb000641,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x0006837e,
-	0xf40464b6,
-/* 0x06e2: i2c_start_send */
-	0x00031d11,
-	0x0006417e,
-	0x7e13884e,
-	0x0300005d,
-	0x06277e00,
+/* 0x0623: i2c_drive_sda_lo */
+	0x4000f804,
+	0x02f607e4,
+	0xf804bd00,
+/* 0x062d: i2c_sense_scl */
+	0x0132f400,
+	0xcf07c443,
+	0x31fd0033,
+	0x060bf404,
+/* 0x063f: i2c_sense_scl_done */
+	0xf80131f4,
+/* 0x0641: i2c_sense_sda */
+	0x0132f400,
+	0xcf07c443,
+	0x32fd0033,
+	0x060bf404,
+/* 0x0653: i2c_sense_sda_done */
+	0xf80131f4,
+/* 0x0655: i2c_raise_scl */
+	0x4440f900,
+	0x01030898,
+	0x0005f97e,
+/* 0x0660: i2c_raise_scl_wait */
+	0x7e03e84e,
+	0x7e000058,
+	0xf400062d,
+	0x42b60901,
+	0xef1bf401,
+/* 0x0674: i2c_raise_scl_done */
+	0x00f840fc,
+/* 0x0678: i2c_start */
+	0x00062d7e,
+	0x7e0d11f4,
+	0xf4000641,
+	0x0ef40611,
+/* 0x0689: i2c_start_rep */
+	0x7e00032e,
+	0x030005f9,
+	0x06137e01,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x557e50fc,
+	0x64b60006,
+	0x1d11f404,
+/* 0x06b4: i2c_start_send */
+	0x137e0003,
+	0x884e0006,
+	0x00587e13,
+	0x7e000300,
+	0x4e0005f9,
+	0x587e1388,
+/* 0x06ce: i2c_start_out */
+	0x00f80000,
+/* 0x06d0: i2c_stop */
+	0xf97e0003,
+	0x00030005,
+	0x0006137e,
+	0x7e03e84e,
+	0x03000058,
+	0x05f97e01,
 	0x13884e00,
-	0x00005d7e,
-/* 0x06fc: i2c_start_out */
-/* 0x06fe: i2c_stop */
-	0x000300f8,
-	0x0006277e,
-	0x417e0003,
-	0xe84e0006,
-	0x005d7e03,
-	0x7e010300,
-	0x4e000627,
-	0x5d7e1388,
-	0x01030000,
-	0x0006417e,
-	0x7e13884e,
-	0xf800005d,
-/* 0x072d: i2c_bitw */
-	0x06417e00,
-	0x03e84e00,
-	0x00005d7e,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x06837e50,
-	0x0464b600,
-	0x4e1711f4,
-	0x5d7e1388,
-	0x00030000,
-	0x0006277e,
-	0x7e13884e,
-/* 0x076b: i2c_bitw_out */
-	0xf800005d,
-/* 0x076d: i2c_bitr */
-	0x7e010300,
-	0x4e000641,
-	0x5d7e03e8,
+	0x0000587e,
+	0x137e0103,
+	0x884e0006,
+	0x00587e13,
+/* 0x06ff: i2c_bitw */
+	0x7e00f800,
+	0x4e000613,
+	0x587e03e8,
 	0x76bb0000,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0x7e50fc04,
-	0xb6000683,
+	0xb6000655,
 	0x11f40464,
-	0x066f7e1a,
-	0x7e000300,
-	0x4e000627,
-	0x5d7e1388,
-	0x3cf00000,
-	0x0131f401,
-/* 0x07b0: i2c_bitr_done */
-/* 0x07b2: i2c_get_byte */
-	0x000500f8,
-/* 0x07b6: i2c_get_byte_next */
-	0x54b60804,
+	0x13884e17,
+	0x0000587e,
+	0xf97e0003,
+	0x884e0005,
+	0x00587e13,
+/* 0x073d: i2c_bitw_out */
+/* 0x073f: i2c_bitr */
+	0x0300f800,
+	0x06137e01,
+	0x03e84e00,
+	0x0000587e,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x06557e50,
+	0x0464b600,
+	0x7e1a11f4,
+	0x03000641,
+	0x05f97e00,
+	0x13884e00,
+	0x0000587e,
+	0xf4013cf0,
+/* 0x0782: i2c_bitr_done */
+	0x00f80131,
+/* 0x0784: i2c_get_byte */
+	0x08040005,
+/* 0x0788: i2c_get_byte_next */
+	0xbb0154b6,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x00073f7e,
+	0xf40464b6,
+	0x53fd2a11,
+	0x0142b605,
+	0x03d81bf4,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0x6d7e50fc,
-	0x64b60007,
-	0x2a11f404,
-	0xb60553fd,
-	0x1bf40142,
-	0xbb0103d8,
+	0xff7e50fc,
+	0x64b60006,
+/* 0x07d1: i2c_get_byte_done */
+/* 0x07d3: i2c_put_byte */
+	0x0400f804,
+/* 0x07d5: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x00072d7e,
-/* 0x07ff: i2c_get_byte_done */
-	0xf80464b6,
-/* 0x0801: i2c_put_byte */
-/* 0x0803: i2c_put_byte_next */
-	0xb6080400,
-	0x54ff0142,
-	0x0076bb38,
+	0x0006ff7e,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x073f7e50,
+	0x0464b600,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x082b: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x082d: i2c_addr */
+	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
-	0x2d7e50fc,
-	0x64b60007,
-	0x3411f404,
-	0xf40046b0,
-	0x76bbd81b,
+	0x787e50fc,
+	0x64b60006,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0x7e50fc04,
-	0xb600076d,
-	0x11f40464,
-	0x0076bb0f,
-	0xf40136b0,
-	0x32f4061b,
-/* 0x0859: i2c_put_byte_done */
-/* 0x085b: i2c_addr */
-	0xbb00f801,
+	0xb60007d3,
+/* 0x0872: i2c_addr_done */
+	0x00f80464,
+/* 0x0874: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b705e4,
+	0x00f8d014,
+/* 0x0880: i2c_acquire */
+	0x0008747e,
+	0x0000047e,
+	0x7e03d9f0,
+	0xf800002d,
+/* 0x0891: i2c_release */
+	0x08747e00,
+	0x00047e00,
+	0x03daf000,
+	0x00002d7e,
+/* 0x08a2: i2c_recv */
+	0x32f400f8,
+	0xf8c1c701,
+	0xb00214b6,
+	0x1ff52816,
+	0x13b80134,
+	0x98000cf4,
+	0x13b80032,
+	0x98000ccc,
+	0x31f40031,
+	0xf9d0f902,
+	0xd6d0f9e0,
+	0x10000000,
+	0xbb016792,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0006a67e,
-	0xf40464b6,
-	0xc3e72911,
-	0x34b6012e,
-	0x0553fd01,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x08017e50,
-	0x0464b600,
-/* 0x08a0: i2c_addr_done */
-/* 0x08a2: i2c_acquire_addr */
-	0xcec700f8,
-	0x05e4b6f8,
-	0xd014e0b7,
-/* 0x08ae: i2c_acquire */
-	0xa27e00f8,
-	0x047e0008,
-	0xd9f00000,
-	0x002e7e03,
-/* 0x08bf: i2c_release */
-	0x7e00f800,
-	0x7e0008a2,
-	0xf0000004,
-	0x2e7e03da,
-	0x00f80000,
-/* 0x08d0: i2c_recv */
-	0xc70132f4,
-	0x14b6f8c1,
-	0x2816b002,
-	0x01371ff5,
-	0x0cf413b8,
-	0x00329800,
-	0x0ccc13b8,
-	0x00319800,
-	0xf90231f4,
-	0xf9e0f9d0,
-	0x0067f1d0,
-	0x0063f100,
-	0x01679210,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x08ae7e50,
-	0x0464b600,
-	0xd6b0d0fc,
-	0xb01bf500,
-	0xbb000500,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x00085b7e,
-	0xf50464b6,
-	0xc700cc11,
-	0x76bbe0c5,
+	0x0008807e,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b01bf5,
+	0x76bb0005,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0x7e50fc04,
-	0xb6000801,
+	0xb600082d,
 	0x11f50464,
-	0x010500a9,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x085b7e50,
-	0x0464b600,
-	0x008711f5,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x07b27e50,
-	0x0464b600,
-	0xcb6711f4,
-	0x76bbe05b,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0x7e50fc04,
-	0xb60006fe,
-	0x5bb20464,
-	0x0ef474bd,
-/* 0x09d5: i2c_recv_not_rd08 */
-	0x01d6b041,
-	0x053b1bf4,
-	0x085b7e00,
-	0x3211f400,
-	0x7ee0c5c7,
-	0xf4000801,
-	0x00052811,
-	0x00085b7e,
-	0xc71f11f4,
-	0x017ee0b5,
-	0x11f40008,
-	0x06fe7e15,
-	0xc774bd00,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0a13: i2c_recv_not_wr08 */
-/* 0x0a13: i2c_recv_done */
-	0xc7030ef4,
-	0xbf7ef8ce,
-	0xe0fc0008,
-	0x12f4d0fc,
-	0x7e7cb209,
-/* 0x0a27: i2c_recv_exit */
-	0xf80002b6,
-/* 0x0a29: i2c_init */
-/* 0x0a2b: test_recv */
-	0x4100f800,
-	0x11cf0458,
-	0x0110b600,
-	0xf6045840,
-	0x04bd0001,
-	0xd900e7f1,
-	0x134fe3f1,
-	0x0001f57e,
-/* 0x0a4a: test_init */
+	0xc5c700cc,
+	0x0076bbe0,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0xd37e50fc,
+	0x64b60007,
+	0xa911f504,
+	0xbb010500,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x00082d7e,
+	0xf50464b6,
+	0xbb008711,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x0007847e,
+	0xf40464b6,
+	0x5bcb6711,
+	0x0076bbe0,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0xd07e50fc,
+	0x64b60006,
+	0xbd5bb204,
+	0x410ef474,
+/* 0x09a4: i2c_recv_not_rd08 */
+	0xf401d6b0,
+	0x00053b1b,
+	0x00082d7e,
+	0xc73211f4,
+	0xd37ee0c5,
+	0x11f40007,
+	0x7e000528,
+	0xf400082d,
+	0xb5c71f11,
+	0x07d37ee0,
+	0x1511f400,
+	0x0006d07e,
+	0xc5c774bd,
+	0x091bf408,
+	0xf40232f4,
+/* 0x09e2: i2c_recv_not_wr08 */
+/* 0x09e2: i2c_recv_done */
+	0xcec7030e,
+	0x08917ef8,
+	0xfce0fc00,
+	0x0912f4d0,
+	0x9f7e7cb2,
+/* 0x09f6: i2c_recv_exit */
+	0x00f80002,
+/* 0x09f8: i2c_init */
+/* 0x09fa: test_recv */
+	0x584100f8,
+	0x0011cf04,
+	0x400110b6,
+	0x01f60458,
+	0xde04bd00,
+	0x134fd900,
+	0x0001de7e,
+/* 0x0a16: test_init */
 	0x004e00f8,
-	0x01f57e08,
-/* 0x0a53: idle_recv */
+	0x01de7e08,
+/* 0x0a1f: idle_recv */
 	0xf800f800,
-/* 0x0a55: idle */
+/* 0x0a21: idle */
 	0x0031f400,
 	0xcf045441,
 	0x10b60011,
 	0x04544001,
 	0xbd0001f6,
-/* 0x0a69: idle_loop */
+/* 0x0a35: idle_loop */
 	0xf4580104,
-/* 0x0a6e: idle_proc */
-/* 0x0a6e: idle_proc_exec */
+/* 0x0a3a: idle_proc */
+/* 0x0a3a: idle_proc_exec */
 	0x10f90232,
-	0xbf7e1eb2,
+	0xa87e1eb2,
 	0x10fc0002,
 	0xf40911f4,
 	0x0ef40231,
-/* 0x0a81: idle_proc_next */
+/* 0x0a4d: idle_proc_next */
 	0x5810b6f0,
 	0x1bf41fa6,
 	0xe002f4e8,
@@ -1726,4 +1713,17 @@
 	0x00000000,
 	0x00000000,
 	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 5165692..e833418 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -24,8 +24,8 @@
 	0x00000000,
 /* 0x0058: proc_list_head */
 	0x54534f48,
-	0x00000507,
-	0x000004a4,
+	0x0000050a,
+	0x000004a7,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -46,8 +46,8 @@
 	0x00000000,
 	0x00000000,
 	0x584d454d,
-	0x00000837,
-	0x00000829,
+	0x0000083a,
+	0x0000082c,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -68,8 +68,8 @@
 	0x00000000,
 	0x00000000,
 	0x46524550,
-	0x0000083b,
-	0x00000839,
+	0x0000083e,
+	0x0000083c,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -90,8 +90,8 @@
 	0x00000000,
 	0x00000000,
 	0x5f433249,
-	0x00000c6b,
-	0x00000b0e,
+	0x00000c6e,
+	0x00000b11,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -112,8 +112,8 @@
 	0x00000000,
 	0x00000000,
 	0x54534554,
-	0x00000c94,
-	0x00000c6d,
+	0x00000c97,
+	0x00000c70,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -134,8 +134,8 @@
 	0x00000000,
 	0x00000000,
 	0x454c4449,
-	0x00000ca0,
-	0x00000c9e,
+	0x00000ca3,
+	0x00000ca1,
 	0x00000000,
 	0x00000000,
 	0x00000000,
@@ -229,26 +229,26 @@
 /* 0x0370: memx_func_head */
 	0x00000001,
 	0x00000000,
-	0x00000546,
+	0x00000549,
 /* 0x037c: memx_func_next */
 	0x00000002,
 	0x00000000,
-	0x0000059d,
+	0x000005a0,
 	0x00000003,
 	0x00000002,
-	0x0000062f,
+	0x00000632,
 	0x00040004,
 	0x00000000,
-	0x0000064b,
+	0x0000064e,
 	0x00010005,
 	0x00000000,
-	0x00000668,
+	0x0000066b,
 	0x00010006,
 	0x00000000,
-	0x000005ed,
+	0x000005f0,
 	0x00000007,
 	0x00000000,
-	0x00000673,
+	0x00000676,
 /* 0x03c4: memx_func_tail */
 /* 0x03c4: memx_ts_start */
 	0x00000000,
@@ -917,947 +917,947 @@
 };
 
 uint32_t gt215_pmu_code[] = {
-	0x03930ef5,
+	0x03920ef5,
 /* 0x0004: rd32 */
 	0x07a007f1,
 	0xd00604b6,
 	0x04bd000e,
-	0xf001d7f0,
-	0x07f101d3,
-	0x04b607ac,
-	0x000dd006,
-/* 0x0022: rd32_wait */
-	0xd7f104bd,
-	0xd4b607ac,
-	0x00ddcf06,
-	0x7000d4f1,
-	0xf1f21bf4,
-	0xb607a4d7,
-	0xddcf06d4,
-/* 0x003f: wr32 */
-	0xf100f800,
-	0xb607a007,
-	0x0ed00604,
-	0xf104bd00,
-	0xb607a407,
+	0x0001d7f1,
+	0xf101d3f0,
+	0xb607ac07,
 	0x0dd00604,
-	0xf004bd00,
-	0xd5f002d7,
-	0x01d3f0f0,
-	0x07ac07f1,
+/* 0x0023: rd32_wait */
+	0xf104bd00,
+	0xb607acd7,
+	0xddcf06d4,
+	0x00d4f100,
+	0xf21bf470,
+	0x07a4d7f1,
+	0xcf06d4b6,
+	0x00f800dd,
+/* 0x0040: wr32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0x07a407f1,
 	0xd00604b6,
 	0x04bd000d,
-/* 0x006c: wr32_wait */
-	0x07acd7f1,
-	0xcf06d4b6,
-	0xd4f100dd,
-	0x1bf47000,
-/* 0x007f: nsec */
-	0xf900f8f2,
-	0xf080f990,
-	0x84b62c87,
-	0x0088cf06,
-/* 0x008c: nsec_loop */
-	0xb62c97f0,
-	0x99cf0694,
-	0x0298bb00,
-	0xf4069eb8,
-	0x80fcf11e,
-	0x00f890fc,
-/* 0x00a4: wait */
-	0x80f990f9,
-	0xb62c87f0,
-	0x88cf0684,
-/* 0x00b1: wait_loop */
-	0x02eeb900,
-	0xb90421f4,
-	0xadfd02da,
-	0x06acb804,
-	0xf0150bf4,
+	0x00f2d7f1,
+	0xf101d3f0,
+	0xb607ac07,
+	0x0dd00604,
+/* 0x006b: wr32_wait */
+	0xf104bd00,
+	0xb607acd7,
+	0xddcf06d4,
+	0x00d4f100,
+	0xf21bf470,
+/* 0x007e: nsec */
+	0x90f900f8,
+	0x87f080f9,
+	0x0684b62c,
+/* 0x008b: nsec_loop */
+	0xf00088cf,
 	0x94b62c97,
 	0x0099cf06,
 	0xb80298bb,
-	0x1ef4069b,
-/* 0x00d5: wait_done */
-	0xfc80fcdf,
-/* 0x00db: intr_watchdog */
-	0x9800f890,
-	0x96b003e9,
-	0x2a0bf400,
-	0xbb9a0a98,
-	0x1cf4029a,
-	0x01d7f00f,
-	0x02d221f5,
-	0x0ef494bd,
-/* 0x00f9: intr_watchdog_next_time */
-	0x9b0a9815,
-	0xf400a6b0,
-	0x9ab8090b,
-	0x061cf406,
-/* 0x0108: intr_watchdog_next_time_set */
-/* 0x010b: intr_watchdog_next_proc */
-	0x809b0980,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc61bf402,
-/* 0x011a: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0xf7f0f0f9,
-	0x0188fe00,
-	0x87f180f9,
-	0x84b605d0,
+	0x1ef4069e,
+	0xfc80fcf1,
+/* 0x00a3: wait */
+	0xf900f890,
+	0xf080f990,
+	0x84b62c87,
 	0x0088cf06,
-	0xf10180b6,
-	0xb605d007,
-	0x08d00604,
-	0xf004bd00,
-	0x84b60887,
-	0x0088cf06,
-	0xf40289c4,
-	0x0080230b,
-	0x58e7f09b,
-	0x98db21f4,
-	0x96b09b09,
-	0x110bf400,
-	0xb63407f0,
-	0x09d00604,
-	0x8004bd00,
-/* 0x017e: intr_skip_watchdog */
-	0x89e49a09,
-	0x0bf40800,
-	0x8897f148,
-	0x0694b606,
-	0xc40099cf,
-	0x0bf4029a,
-	0xc0c7f12c,
-	0x06c4b604,
-	0xf900cccf,
-	0x48e7f1c0,
-	0x53e3f14f,
-	0x00d7f054,
-	0x033721f5,
-	0x07f1c0fc,
-	0x04b604c0,
-	0x000cd006,
-/* 0x01be: intr_subintr_skip_fifo */
-	0x07f104bd,
-	0x04b60688,
-	0x0009d006,
-/* 0x01ca: intr_skip_subintr */
-	0x97f104bd,
-	0x90bd00e0,
-	0xf00489fd,
-	0x04b60407,
+/* 0x00b0: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0150b,
+	0x0694b62c,
+	0xbb0099cf,
+	0x9bb80298,
+	0xdf1ef406,
+/* 0x00d4: wait_done */
+	0x90fc80fc,
+/* 0x00da: intr_watchdog */
+	0xe99800f8,
+	0x0096b003,
+	0x982a0bf4,
+	0x9abb9a0a,
+	0x0f1cf402,
+	0xf501d7f0,
+	0xbd02d121,
+	0x150ef494,
+/* 0x00f8: intr_watchdog_next_time */
+	0xb09b0a98,
+	0x0bf400a6,
+	0x069ab809,
+/* 0x0107: intr_watchdog_next_time_set */
+	0x80061cf4,
+/* 0x010a: intr_watchdog_next_proc */
+	0xe9809b09,
+	0x58e0b603,
+	0x0268e6b1,
+	0xf8c61bf4,
+/* 0x0119: intr */
+	0xbd00f900,
+	0xf980f904,
+	0xf9a0f990,
+	0xf9c0f9b0,
+	0xf9e0f9d0,
+	0x00f7f0f0,
+	0xf90188fe,
+	0xd087f180,
+	0x0684b605,
+	0xb60088cf,
+	0x07f10180,
+	0x04b605d0,
 	0x0008d006,
-	0x80fc04bd,
-	0xfc0088fe,
-	0xfce0fcf0,
-	0xfcc0fcd0,
-	0xfca0fcb0,
-	0xfc80fc90,
-	0x0032f400,
-/* 0x01fa: ticks_from_ns */
-	0xc0f901f8,
+	0x87f004bd,
+	0x0684b608,
+	0xc40088cf,
+	0x0bf40289,
+	0x9b008023,
+	0xf458e7f0,
+	0x0998da21,
+	0x0096b09b,
+	0xf0110bf4,
+	0x04b63407,
+	0x0009d006,
+	0x098004bd,
+/* 0x017d: intr_skip_watchdog */
+	0x0089e49a,
+	0x480bf408,
+	0x068897f1,
+	0xcf0694b6,
+	0x9ac40099,
+	0x2c0bf402,
+	0x04c0c7f1,
+	0xcf06c4b6,
+	0xc0f900cc,
+	0x4f48e7f1,
+	0x5453e3f1,
+	0xf500d7f0,
+	0xfc033621,
+	0xc007f1c0,
+	0x0604b604,
+	0xbd000cd0,
+/* 0x01bd: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0604b606,
+	0xbd0009d0,
+/* 0x01c9: intr_skip_subintr */
+	0xe097f104,
+	0xfd90bd00,
+	0x07f00489,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f9: ticks_from_ns */
+	0xf9c0f901,
+	0xcbd7f1b0,
+	0x00d3f000,
+	0x040b21f5,
+	0x03e8ccec,
+	0xf400b4b0,
+	0xeeec120b,
+	0xd7f103e8,
+	0xd3f000cb,
+	0x0b21f500,
+/* 0x0221: ticks_from_ns_quit */
+	0x02ceb904,
+	0xc0fcb0fc,
+/* 0x022a: ticks_from_us */
+	0xc0f900f8,
 	0xd7f1b0f9,
 	0xd3f000cb,
-	0x0821f500,
-	0xe8ccec04,
-	0x00b4b003,
-	0xec120bf4,
-	0xf103e8ee,
-	0xf000cbd7,
-	0x21f500d3,
-/* 0x0222: ticks_from_ns_quit */
-	0xceb90408,
-	0xfcb0fc02,
-/* 0x022b: ticks_from_us */
-	0xf900f8c0,
-	0xf1b0f9c0,
-	0xf000cbd7,
-	0x21f500d3,
-	0xceb90408,
-	0x00b4b002,
-	0xbd050bf4,
-/* 0x0245: ticks_from_us_quit */
-	0xfcb0fce4,
-/* 0x024b: ticks_to_us */
-	0xf100f8c0,
-	0xf000cbd7,
-	0xedff00d3,
-/* 0x0257: timer */
-	0xf900f8ec,
-	0xf480f990,
-	0xf8981032,
-	0x0086b003,
-	0xbd651cf4,
-	0x3807f084,
-	0xd00604b6,
-	0x04bd0008,
-	0xb63487f0,
-	0x88cf0684,
-	0x9a099800,
-	0xbb0298bb,
-	0xfe8000e9,
-	0x0887f003,
-	0xcf0684b6,
-	0x84f00088,
-	0x261bf402,
-	0xb63487f0,
-	0x88cf0684,
-	0x06e0b800,
-	0xb8090bf4,
-	0x1cf406e8,
-/* 0x02ad: timer_reset */
-	0x3407f011,
-	0xd00604b6,
-	0x04bd000e,
-/* 0x02bb: timer_enable */
-	0xf09a0e80,
-	0x07f00187,
-	0x0604b638,
-	0xbd0008d0,
-/* 0x02c9: timer_done */
-	0x1031f404,
-	0x90fc80fc,
-/* 0x02d2: send_proc */
-	0x80f900f8,
-	0xe89890f9,
-	0x04e99805,
-	0xb80486f0,
-	0x0bf40689,
-	0x0398c42a,
-	0xb6048894,
-	0x8ebb1880,
-	0x00fa9800,
-	0x80008a80,
-	0x8c80018d,
-	0x038b8002,
-	0xf00190b6,
-	0xe9800794,
-	0x0231f404,
-/* 0x030c: send_done */
-	0x80fc90fc,
-/* 0x0312: find */
-	0x80f900f8,
-	0xf45887f0,
-/* 0x031a: find_loop */
-	0x8a980131,
-	0x06aeb800,
-	0xb6100bf4,
-	0x86b15880,
-	0x1bf40268,
-	0x0132f4f0,
-/* 0x0330: find_done */
-	0xfc028eb9,
-/* 0x0337: send */
-	0xf500f880,
-	0xf4031221,
-	0x00f89701,
-/* 0x0340: recv */
-	0x80f990f9,
-	0x9805e898,
-	0x32f404e9,
-	0x0689b801,
-	0xc43d0bf4,
-	0x80b60389,
-	0x0784f001,
-	0x9805e880,
-	0xf0f902ea,
-	0xf9018ffe,
-	0x02efb9f0,
-	0xbb049994,
-	0xe0b600e9,
-	0x03eb9818,
-	0x9802ec98,
-	0xee9801ed,
-	0xfca5f900,
-	0x00f8fef0,
-	0xfc0131f4,
-/* 0x038d: recv_done */
-	0xfc80fcf0,
-/* 0x0393: init */
-	0xf100f890,
-	0xb6010817,
-	0x11cf0614,
-	0x0911e700,
-	0x0814b601,
-	0xf10014fe,
-	0xf000e017,
-	0x07f00013,
-	0x0604b61c,
-	0xbd0001d0,
-	0xff17f004,
-	0xb61407f0,
-	0x01d00604,
+	0x0b21f500,
+	0x02ceb904,
+	0xf400b4b0,
+	0xe4bd050b,
+/* 0x0244: ticks_from_us_quit */
+	0xc0fcb0fc,
+/* 0x024a: ticks_to_us */
+	0xd7f100f8,
+	0xd3f000cb,
+	0xecedff00,
+/* 0x0256: timer */
+	0x90f900f8,
+	0x32f480f9,
+	0x03f89810,
+	0xf40086b0,
+	0x84bd651c,
+	0xb63807f0,
+	0x08d00604,
 	0xf004bd00,
-	0x15f10217,
-	0x07f00800,
-	0x0604b610,
-	0xbd0001d0,
-	0x1a17f104,
-	0x0013f001,
-	0xf40010fe,
-	0x17f01031,
+	0x84b63487,
+	0x0088cf06,
+	0xbb9a0998,
+	0xe9bb0298,
+	0x03fe8000,
+	0xb60887f0,
+	0x88cf0684,
+	0x0284f000,
+	0xf0261bf4,
+	0x84b63487,
+	0x0088cf06,
+	0xf406e0b8,
+	0xe8b8090b,
+	0x111cf406,
+/* 0x02ac: timer_reset */
+	0xb63407f0,
+	0x0ed00604,
+	0x8004bd00,
+/* 0x02ba: timer_enable */
+	0x87f09a0e,
 	0x3807f001,
 	0xd00604b6,
-	0x04bd0001,
-/* 0x03f7: init_proc */
-	0x9858f7f0,
-	0x16b001f1,
-	0xfa0bf400,
-	0xf0b615f9,
-	0xf20ef458,
-/* 0x0408: mulu32_32_64 */
-	0x20f910f9,
-	0x40f930f9,
-	0x9510e195,
-	0xc4bd10d2,
-	0xedffb4bd,
-	0x301dffc0,
-	0xf10234b9,
-	0xb6ffff34,
-	0x45b61034,
-	0x00c3bb10,
-	0xff01b4bb,
-	0x34b930e2,
-	0xff34f102,
-	0x1034b6ff,
-	0xbb1045b6,
-	0xb4bb00c3,
-	0x3012ff01,
-	0xfc00b3bb,
-	0xfc30fc40,
-	0xf810fc20,
-/* 0x0459: host_send */
-	0xb017f100,
-	0x0614b604,
-	0xf10011cf,
-	0xb604a027,
-	0x22cf0624,
-	0x0612b800,
-	0xc4320bf4,
-	0xee94071e,
-	0x70e0b704,
-	0x03eb9802,
-	0x9802ec98,
-	0xee9801ed,
-	0x3721f500,
-	0x0110b603,
-	0xf10f1ec4,
-	0xb604b007,
-	0x0ed00604,
-	0xf404bd00,
-/* 0x04a2: host_send_done */
-	0x00f8ba0e,
-/* 0x04a4: host_recv */
-	0x4e4917f1,
-	0x525413f1,
-	0xf406e1b8,
-/* 0x04b2: host_recv_wait */
-	0x17f1aa0b,
-	0x14b604cc,
+	0x04bd0008,
+/* 0x02c8: timer_done */
+	0xfc1031f4,
+	0xf890fc80,
+/* 0x02d1: send_proc */
+	0xf980f900,
+	0x05e89890,
+	0xf004e998,
+	0x89b80486,
+	0x2a0bf406,
+	0x940398c4,
+	0x80b60488,
+	0x008ebb18,
+	0x8000fa98,
+	0x8d80008a,
+	0x028c8001,
+	0xb6038b80,
+	0x94f00190,
+	0x04e98007,
+/* 0x030b: send_done */
+	0xfc0231f4,
+	0xf880fc90,
+/* 0x0311: find */
+	0xf080f900,
+	0x31f45887,
+/* 0x0319: find_loop */
+	0x008a9801,
+	0xf406aeb8,
+	0x80b6100b,
+	0x6886b158,
+	0xf01bf402,
+/* 0x032f: find_done */
+	0xb90132f4,
+	0x80fc028e,
+/* 0x0336: send */
+	0x21f500f8,
+	0x01f40311,
+/* 0x033f: recv */
+	0xf900f897,
+	0x9880f990,
+	0xe99805e8,
+	0x0132f404,
+	0xf40689b8,
+	0x89c43d0b,
+	0x0180b603,
+	0x800784f0,
+	0xea9805e8,
+	0xfef0f902,
+	0xf0f9018f,
+	0x9402efb9,
+	0xe9bb0499,
+	0x18e0b600,
+	0x9803eb98,
+	0xed9802ec,
+	0x00ee9801,
+	0xf0fca5f9,
+	0xf400f8fe,
+	0xf0fc0131,
+/* 0x038c: recv_done */
+	0x90fc80fc,
+/* 0x0392: init */
+	0x17f100f8,
+	0x14b60108,
 	0x0011cf06,
-	0x04c827f1,
-	0xcf0624b6,
-	0x16f00022,
-	0x0612b808,
-	0xc4e60bf4,
-	0x34b60723,
-	0xf030b704,
-	0x033b8002,
-	0x80023c80,
-	0x3e80013d,
-	0x0120b600,
-	0xf10f24f0,
-	0xb604c807,
-	0x02d00604,
-	0xf004bd00,
-	0x07f04027,
-	0x0604b600,
-	0xbd0002d0,
-/* 0x0507: host_init */
-	0xf100f804,
-	0xb6008017,
-	0x15f11014,
-	0x07f10270,
-	0x04b604d0,
-	0x0001d006,
-	0x17f104bd,
-	0x14b60080,
-	0xf015f110,
-	0xdc07f102,
-	0x0604b604,
-	0xbd0001d0,
-	0x0117f004,
-	0x04c407f1,
+	0x010911e7,
+	0xfe0814b6,
+	0x17f10014,
+	0x13f000e0,
+	0x1c07f000,
 	0xd00604b6,
 	0x04bd0001,
-/* 0x0546: memx_func_enter */
-	0x87f100f8,
-	0x8eb91610,
-	0x0421f402,
-	0xf102d7b9,
-	0xf1fffc67,
-	0xfdffff63,
-	0x67f10476,
-	0x76fd0002,
-	0xf980f905,
-	0xfcd0fc70,
-	0x3f21f4e0,
+	0xf0ff17f0,
+	0x04b61407,
+	0x0001d006,
+	0x17f004bd,
+	0x0015f102,
+	0x1007f008,
+	0xd00604b6,
+	0x04bd0001,
+	0x011917f1,
+	0xf10013f0,
+	0xfeffff14,
+	0x31f40010,
+	0x0117f010,
+	0xb63807f0,
+	0x01d00604,
+	0xf004bd00,
+/* 0x03fa: init_proc */
+	0xf19858f7,
+	0x0016b001,
+	0xf9fa0bf4,
+	0x58f0b615,
+/* 0x040b: mulu32_32_64 */
+	0xf9f20ef4,
+	0xf920f910,
+	0x9540f930,
+	0xd29510e1,
+	0xbdc4bd10,
+	0xc0edffb4,
+	0xb9301dff,
+	0x34f10234,
+	0x34b6ffff,
+	0x1045b610,
+	0xbb00c3bb,
+	0xe2ff01b4,
+	0x0234b930,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xbb3012ff,
+	0x40fc00b3,
+	0x20fc30fc,
+	0x00f810fc,
+/* 0x045c: host_send */
+	0x04b017f1,
+	0xcf0614b6,
+	0x27f10011,
+	0x24b604a0,
+	0x0022cf06,
+	0xf40612b8,
+	0x1ec4320b,
+	0x04ee9407,
+	0x0270e0b7,
+	0x9803eb98,
+	0xed9802ec,
+	0x00ee9801,
+	0x033621f5,
+	0xc40110b6,
+	0x07f10f1e,
+	0x04b604b0,
+	0x000ed006,
+	0x0ef404bd,
+/* 0x04a5: host_send_done */
+/* 0x04a7: host_recv */
+	0xf100f8ba,
+	0xf14e4917,
+	0xb8525413,
+	0x0bf406e1,
+/* 0x04b5: host_recv_wait */
+	0xcc17f1aa,
+	0x0614b604,
+	0xf10011cf,
+	0xb604c827,
+	0x22cf0624,
+	0x0816f000,
+	0xf40612b8,
+	0x23c4e60b,
+	0x0434b607,
+	0x02f030b7,
+	0x80033b80,
+	0x3d80023c,
+	0x003e8001,
+	0xf00120b6,
+	0x07f10f24,
+	0x04b604c8,
+	0x0002d006,
+	0x27f004bd,
+	0x0007f040,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x050a: host_init */
+	0x17f100f8,
+	0x14b60080,
+	0x7015f110,
+	0xd007f102,
+	0x0604b604,
+	0xbd0001d0,
+	0x8017f104,
+	0x1014b600,
+	0x02f015f1,
+	0x04dc07f1,
+	0xd00604b6,
+	0x04bd0001,
+	0xf10117f0,
+	0xb604c407,
+	0x01d00604,
+	0xf804bd00,
+/* 0x0549: memx_func_enter */
+	0x1087f100,
+	0x028eb916,
+	0xb90421f4,
+	0x67f102d7,
+	0x63f1fffc,
+	0x76fdffff,
+	0x0267f104,
+	0x0576fd00,
+	0x70f980f9,
+	0xe0fcd0fc,
+	0xf04021f4,
+	0x07f10467,
+	0x04b607e0,
+	0x0006d006,
+/* 0x0582: memx_func_enter_wait */
+	0x67f104bd,
+	0x64b607c0,
+	0x0066cf06,
+	0xf40464f0,
+	0x67f0f30b,
+	0x0664b62c,
+	0x800066cf,
+	0x00f8f106,
+/* 0x05a0: memx_func_leave */
+	0xb62c67f0,
+	0x66cf0664,
+	0xf2068000,
 	0xf10467f0,
-	0xb607e007,
+	0xb607e407,
 	0x06d00604,
-/* 0x057f: memx_func_enter_wait */
+/* 0x05bb: memx_func_leave_wait */
 	0xf104bd00,
 	0xb607c067,
 	0x66cf0664,
 	0x0464f000,
-	0xf0f30bf4,
-	0x64b62c67,
-	0x0066cf06,
-	0xf8f10680,
-/* 0x059d: memx_func_leave */
-	0x2c67f000,
-	0xcf0664b6,
-	0x06800066,
-	0x0467f0f2,
-	0x07e407f1,
-	0xd00604b6,
-	0x04bd0006,
-/* 0x05b8: memx_func_leave_wait */
-	0x07c067f1,
-	0xcf0664b6,
-	0x64f00066,
-	0xf31bf404,
-	0x161087f1,
-	0xf4028eb9,
-	0xd7b90421,
-	0xcc67f102,
-	0xff63f1ff,
-	0x0476fdff,
-	0x70f980f9,
-	0xe0fcd0fc,
-	0xf83f21f4,
-/* 0x05ed: memx_func_wait_vblank */
-	0x00169800,
-	0xf40066b0,
-	0x66b0130b,
-	0x060bf401,
-/* 0x05ff: memx_func_wait_vblank_head1 */
-	0xf12e0ef4,
-	0xf4002077,
-/* 0x0606: memx_func_wait_vblank_head0 */
-	0x77f1070e,
-/* 0x060a: memx_func_wait_vblank_0 */
-	0x67f10008,
-	0x64b607c4,
-	0x0066cf06,
-	0xf40467fd,
-/* 0x061a: memx_func_wait_vblank_1 */
-	0x67f1f31b,
-	0x64b607c4,
-	0x0066cf06,
-	0xf40467fd,
-/* 0x062a: memx_func_wait_vblank_fini */
-	0x10b6f30b,
-/* 0x062f: memx_func_wr32 */
-	0x9800f804,
-	0x15980016,
-	0x0810b601,
-	0x50f960f9,
-	0xe0fcd0fc,
-	0xb63f21f4,
-	0x1bf40242,
-/* 0x064b: memx_func_wait */
-	0xf000f8e9,
-	0x84b62c87,
-	0x0088cf06,
-	0x98001e98,
-	0x1c98011d,
-	0x031b9802,
-	0xf41010b6,
-	0x00f8a421,
-/* 0x0668: memx_func_delay */
-	0xb6001e98,
-	0x21f40410,
-/* 0x0673: memx_func_train */
-	0xf100f87f,
-	0xf1000357,
-	0xf1000077,
-	0xf0000097,
-	0x9eb97093,
-	0x0421f402,
-	0xf102d8b9,
-	0xf42710e7,
-/* 0x0692: memx_func_train_loop_outer */
-	0x58e07f21,
-	0x83f10101,
-	0x97f10200,
-	0x93f011e0,
-	0xf990f911,
-	0xfcd0fc80,
-	0x3f21f4e0,
-	0x67f150f9,
-/* 0x06b2: memx_func_train_loop_inner */
-	0x87f10000,
-	0x68ff1111,
-	0x10989490,
-	0xf10589fd,
-	0xf0072097,
-	0x90f91093,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0x8097f13f,
-	0x1093f000,
-	0xf4029eb9,
-	0xd8b90421,
-	0x2088c502,
+	0xf1f31bf4,
+	0xb9161087,
+	0x21f4028e,
+	0x02d7b904,
+	0xffcc67f1,
+	0xffff63f1,
+	0xf90476fd,
+	0xfc70f980,
+	0xf4e0fcd0,
+	0x00f84021,
+/* 0x05f0: memx_func_wait_vblank */
+	0xb0001698,
+	0x0bf40066,
+	0x0166b013,
+	0xf4060bf4,
+/* 0x0602: memx_func_wait_vblank_head1 */
+	0x77f12e0e,
+	0x0ef40020,
+/* 0x0609: memx_func_wait_vblank_head0 */
+	0x0877f107,
+/* 0x060d: memx_func_wait_vblank_0 */
+	0xc467f100,
+	0x0664b607,
+	0xfd0066cf,
+	0x1bf40467,
+/* 0x061d: memx_func_wait_vblank_1 */
+	0xc467f1f3,
+	0x0664b607,
+	0xfd0066cf,
+	0x0bf40467,
+/* 0x062d: memx_func_wait_vblank_fini */
+	0x0410b6f3,
+/* 0x0632: memx_func_wr32 */
+	0x169800f8,
+	0x01159800,
+	0xf90810b6,
+	0xfc50f960,
+	0xf4e0fcd0,
+	0x42b64021,
+	0xe91bf402,
+/* 0x064e: memx_func_wait */
+	0x87f000f8,
+	0x0684b62c,
+	0x980088cf,
+	0x1d98001e,
+	0x021c9801,
+	0xb6031b98,
+	0x21f41010,
+/* 0x066b: memx_func_delay */
+	0x9800f8a3,
+	0x10b6001e,
+	0x7e21f404,
+/* 0x0676: memx_func_train */
+	0x57f100f8,
+	0x77f10003,
+	0x97f10000,
+	0x93f00000,
+	0x029eb970,
+	0xb90421f4,
+	0xe7f102d8,
+	0x21f42710,
+/* 0x0695: memx_func_train_loop_outer */
+	0x0158e07e,
+	0x0083f101,
+	0xe097f102,
+	0x1193f011,
 	0x80f990f9,
 	0xe0fcd0fc,
-	0xf13f21f4,
-	0xf0053c97,
-	0x87f11093,
-	0x83f13002,
-	0x90f98000,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0x60e7f13f,
-	0x10e3f005,
-	0x0000d7f1,
-	0x8000d3f1,
-	0xf100dc90,
-	0xf08480b7,
-	0x21f41eb3,
-	0x0057f1a4,
-	0xff97f100,
-	0x0093f1ff,
-/* 0x0731: memx_func_train_loop_4x */
-	0x80a7f183,
-	0x10a3f000,
-	0xf402aeb9,
-	0xd8b90421,
-	0xdfb7f102,
-	0xffb3f1ff,
-	0x048bfdff,
-	0x80f9a0f9,
-	0xe0fcd0fc,
-	0xf13f21f4,
-	0xf0053ca7,
-	0x87f110a3,
-	0x83f13002,
-	0xa0f98000,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0x60e7f13f,
-	0x10e3f005,
-	0x0000d7f1,
-	0x8000d3f1,
-	0xf102dcb9,
-	0xf02710b7,
-	0x21f400b3,
-	0x02eeb9a4,
-	0xb90421f4,
-	0x9dff02dd,
-	0x0150b694,
-	0xf4045670,
-	0x7aa0921e,
-	0xa9800bcc,
-	0x0160b600,
-	0x700470b6,
-	0x1ef51066,
-	0x50fcff00,
+	0xf94021f4,
+	0x0067f150,
+/* 0x06b5: memx_func_train_loop_inner */
+	0x1187f100,
+	0x9068ff11,
+	0xfd109894,
+	0x97f10589,
+	0x93f00720,
+	0xf990f910,
+	0xfcd0fc80,
+	0x4021f4e0,
+	0x008097f1,
+	0xb91093f0,
+	0x21f4029e,
+	0x02d8b904,
+	0xf92088c5,
+	0xfc80f990,
+	0xf4e0fcd0,
+	0x97f14021,
+	0x93f0053c,
+	0x0287f110,
+	0x0083f130,
+	0xf990f980,
+	0xfcd0fc80,
+	0x4021f4e0,
+	0x0560e7f1,
+	0xf110e3f0,
+	0xf10000d7,
+	0x908000d3,
+	0xb7f100dc,
+	0xb3f08480,
+	0xa321f41e,
+	0x000057f1,
+	0xffff97f1,
+	0x830093f1,
+/* 0x0734: memx_func_train_loop_4x */
+	0x0080a7f1,
+	0xb910a3f0,
+	0x21f402ae,
+	0x02d8b904,
+	0xffdfb7f1,
+	0xffffb3f1,
+	0xf9048bfd,
+	0xfc80f9a0,
+	0xf4e0fcd0,
+	0xa7f14021,
+	0xa3f0053c,
+	0x0287f110,
+	0x0083f130,
+	0xf9a0f980,
+	0xfcd0fc80,
+	0x4021f4e0,
+	0x0560e7f1,
+	0xf110e3f0,
+	0xf10000d7,
+	0xb98000d3,
+	0xb7f102dc,
+	0xb3f02710,
+	0xa321f400,
+	0xf402eeb9,
+	0xddb90421,
+	0x949dff02,
 	0x700150b6,
-	0x1ef50756,
-	0x00f8fed4,
-/* 0x07c4: memx_exec */
-	0xd0f9e0f9,
-	0xb902c1b9,
-/* 0x07ce: memx_exec_next */
-	0x139802b2,
-	0x0410b600,
-	0x01f034e7,
-	0x01e033e7,
-	0xf00132b6,
-	0x35980c30,
-	0xb855f9de,
-	0x1ef40612,
-	0xf10b98e4,
-	0xbbf20c98,
-	0xb7f102cb,
-	0xb4b607c4,
-	0x00bbcf06,
-	0xe0fcd0fc,
-	0x033721f5,
-/* 0x080a: memx_info */
-	0xc67000f8,
-	0x0e0bf401,
-/* 0x0810: memx_info_data */
-	0x03ccc7f1,
-	0x0800b7f1,
-/* 0x081b: memx_info_train */
-	0xf10b0ef4,
-	0xf10bccc7,
-/* 0x0823: memx_info_send */
-	0xf50100b7,
-	0xf8033721,
-/* 0x0829: memx_recv */
-	0x01d6b000,
-	0xb0980bf4,
-	0x0bf400d6,
-/* 0x0837: memx_init */
-	0xf800f8d8,
-/* 0x0839: perf_recv */
-/* 0x083b: perf_init */
-	0xf800f800,
-/* 0x083d: i2c_drive_scl */
-	0x0036b000,
-	0xf1110bf4,
-	0xb607e007,
-	0x01d00604,
-	0xf804bd00,
-/* 0x0851: i2c_drive_scl_lo */
-	0xe407f100,
-	0x0604b607,
-	0xbd0001d0,
-/* 0x085f: i2c_drive_sda */
-	0xb000f804,
-	0x0bf40036,
-	0xe007f111,
-	0x0604b607,
-	0xbd0002d0,
-/* 0x0873: i2c_drive_sda_lo */
-	0xf100f804,
-	0xb607e407,
-	0x02d00604,
-	0xf804bd00,
-/* 0x0881: i2c_sense_scl */
-	0x0132f400,
-	0x07c437f1,
-	0xcf0634b6,
-	0x31fd0033,
-	0x060bf404,
-/* 0x0897: i2c_sense_scl_done */
-	0xf80131f4,
-/* 0x0899: i2c_sense_sda */
-	0x0132f400,
-	0x07c437f1,
-	0xcf0634b6,
-	0x32fd0033,
-	0x060bf404,
-/* 0x08af: i2c_sense_sda_done */
-	0xf80131f4,
-/* 0x08b1: i2c_raise_scl */
-	0xf140f900,
-	0xf0089847,
-	0x21f50137,
-/* 0x08be: i2c_raise_scl_wait */
-	0xe7f1083d,
-	0x21f403e8,
-	0x8121f57f,
-	0x0901f408,
-	0xf40142b6,
-/* 0x08d2: i2c_raise_scl_done */
-	0x40fcef1b,
-/* 0x08d6: i2c_start */
-	0x21f500f8,
-	0x11f40881,
-	0x9921f50d,
-	0x0611f408,
-/* 0x08e7: i2c_start_rep */
-	0xf0300ef4,
+	0x1ef40456,
+	0xcc7aa092,
+	0x00a9800b,
+	0xb60160b6,
+	0x66700470,
+	0x001ef510,
+	0xb650fcff,
+	0x56700150,
+	0xd41ef507,
+/* 0x07c7: memx_exec */
+	0xf900f8fe,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x07d1: memx_exec_next */
+	0x00139802,
+	0xe70410b6,
+	0xe701f034,
+	0xb601e033,
+	0x30f00132,
+	0xde35980c,
+	0x12b855f9,
+	0xe41ef406,
+	0x98f10b98,
+	0xcbbbf20c,
+	0xc4b7f102,
+	0x06b4b607,
+	0xfc00bbcf,
+	0xf5e0fcd0,
+	0xf8033621,
+/* 0x080d: memx_info */
+	0x01c67000,
+/* 0x0813: memx_info_data */
+	0xf10e0bf4,
+	0xf103ccc7,
+	0xf40800b7,
+/* 0x081e: memx_info_train */
+	0xc7f10b0e,
+	0xb7f10bcc,
+/* 0x0826: memx_info_send */
+	0x21f50100,
+	0x00f80336,
+/* 0x082c: memx_recv */
+	0xf401d6b0,
+	0xd6b0980b,
+	0xd80bf400,
+/* 0x083a: memx_init */
+	0x00f800f8,
+/* 0x083c: perf_recv */
+/* 0x083e: perf_init */
+	0x00f800f8,
+/* 0x0840: i2c_drive_scl */
+	0xf40036b0,
+	0x07f1110b,
+	0x04b607e0,
+	0x0001d006,
+	0x00f804bd,
+/* 0x0854: i2c_drive_scl_lo */
+	0x07e407f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x0862: i2c_drive_sda */
+	0x36b000f8,
+	0x110bf400,
+	0x07e007f1,
+	0xd00604b6,
+	0x04bd0002,
+/* 0x0876: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x04b607e4,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0884: i2c_sense_scl */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0431fd00,
+	0xf4060bf4,
+/* 0x089a: i2c_sense_scl_done */
+	0x00f80131,
+/* 0x089c: i2c_sense_sda */
+	0xf10132f4,
+	0xb607c437,
+	0x33cf0634,
+	0x0432fd00,
+	0xf4060bf4,
+/* 0x08b2: i2c_sense_sda_done */
+	0x00f80131,
+/* 0x08b4: i2c_raise_scl */
+	0x47f140f9,
+	0x37f00898,
+	0x4021f501,
+/* 0x08c1: i2c_raise_scl_wait */
+	0xe8e7f108,
+	0x7e21f403,
+	0x088421f5,
+	0xb60901f4,
+	0x1bf40142,
+/* 0x08d5: i2c_raise_scl_done */
+	0xf840fcef,
+/* 0x08d9: i2c_start */
+	0x8421f500,
+	0x0d11f408,
+	0x089c21f5,
+	0xf40611f4,
+/* 0x08ea: i2c_start_rep */
+	0x37f0300e,
+	0x4021f500,
+	0x0137f008,
+	0x086221f5,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xb421f550,
+	0x0464b608,
+/* 0x0917: i2c_start_send */
+	0xf01f11f4,
 	0x21f50037,
-	0x37f0083d,
-	0x5f21f501,
-	0x0076bb08,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b608b1,
-	0x1f11f404,
-/* 0x0914: i2c_start_send */
-	0xf50037f0,
-	0xf1085f21,
-	0xf41388e7,
-	0x37f07f21,
-	0x3d21f500,
-	0x88e7f108,
-	0x7f21f413,
-/* 0x0930: i2c_start_out */
-/* 0x0932: i2c_stop */
-	0x37f000f8,
-	0x3d21f500,
-	0x0037f008,
-	0x085f21f5,
-	0x03e8e7f1,
-	0xf07f21f4,
-	0x21f50137,
-	0xe7f1083d,
+	0xe7f10862,
 	0x21f41388,
-	0x0137f07f,
-	0x085f21f5,
+	0x0037f07e,
+	0x084021f5,
 	0x1388e7f1,
-	0xf87f21f4,
-/* 0x0965: i2c_bitw */
-	0x5f21f500,
-	0xe8e7f108,
-	0x7f21f403,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xb121f550,
-	0x0464b608,
-	0xf11811f4,
-	0xf41388e7,
-	0x37f07f21,
-	0x3d21f500,
+/* 0x0933: i2c_start_out */
+	0xf87e21f4,
+/* 0x0935: i2c_stop */
+	0x0037f000,
+	0x084021f5,
+	0xf50037f0,
+	0xf1086221,
+	0xf403e8e7,
+	0x37f07e21,
+	0x4021f501,
 	0x88e7f108,
-	0x7f21f413,
-/* 0x09a4: i2c_bitw_out */
-/* 0x09a6: i2c_bitr */
-	0x37f000f8,
-	0x5f21f501,
-	0xe8e7f108,
-	0x7f21f403,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xb121f550,
-	0x0464b608,
-	0xf51b11f4,
-	0xf0089921,
-	0x21f50037,
-	0xe7f1083d,
+	0x7e21f413,
+	0xf50137f0,
+	0xf1086221,
+	0xf41388e7,
+	0x00f87e21,
+/* 0x0968: i2c_bitw */
+	0x086221f5,
+	0x03e8e7f1,
+	0xbb7e21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x08b421f5,
+	0xf40464b6,
+	0xe7f11811,
 	0x21f41388,
-	0x013cf07f,
-/* 0x09eb: i2c_bitr_done */
-	0xf80131f4,
-/* 0x09ed: i2c_get_byte */
-	0x0057f000,
-/* 0x09f3: i2c_get_byte_next */
-	0xb60847f0,
-	0x76bb0154,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb609a621,
-	0x11f40464,
-	0x0553fd2b,
-	0xf40142b6,
-	0x37f0d81b,
+	0x0037f07e,
+	0x084021f5,
+	0x1388e7f1,
+/* 0x09a7: i2c_bitw_out */
+	0xf87e21f4,
+/* 0x09a9: i2c_bitr */
+	0x0137f000,
+	0x086221f5,
+	0x03e8e7f1,
+	0xbb7e21f4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x08b421f5,
+	0xf40464b6,
+	0x21f51b11,
+	0x37f0089c,
+	0x4021f500,
+	0x88e7f108,
+	0x7e21f413,
+	0xf4013cf0,
+/* 0x09ee: i2c_bitr_done */
+	0x00f80131,
+/* 0x09f0: i2c_get_byte */
+	0xf00057f0,
+/* 0x09f6: i2c_get_byte_next */
+	0x54b60847,
 	0x0076bb01,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b60965,
-/* 0x0a3d: i2c_get_byte_done */
-/* 0x0a3f: i2c_put_byte */
-	0xf000f804,
-/* 0x0a42: i2c_put_byte_next */
-	0x42b60847,
-	0x3854ff01,
+	0x64b609a9,
+	0x2b11f404,
+	0xb60553fd,
+	0x1bf40142,
+	0x0137f0d8,
 	0xb60076bb,
 	0x50f90465,
 	0xbb046594,
 	0x50bd0256,
 	0xfc0475fd,
-	0x6521f550,
+	0x6821f550,
 	0x0464b609,
-	0xb03411f4,
-	0x1bf40046,
-	0x0076bbd8,
+/* 0x0a40: i2c_get_byte_done */
+/* 0x0a42: i2c_put_byte */
+	0x47f000f8,
+/* 0x0a45: i2c_put_byte_next */
+	0x0142b608,
+	0xbb3854ff,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x096821f5,
+	0xf40464b6,
+	0x46b03411,
+	0xd81bf400,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xa921f550,
+	0x0464b609,
+	0xbb0f11f4,
+	0x36b00076,
+	0x061bf401,
+/* 0x0a9b: i2c_put_byte_done */
+	0xf80132f4,
+/* 0x0a9d: i2c_addr */
+	0x0076bb00,
 	0xf90465b6,
 	0x04659450,
 	0xbd0256bb,
 	0x0475fd50,
 	0x21f550fc,
-	0x64b609a6,
-	0x0f11f404,
-	0xb00076bb,
-	0x1bf40136,
-	0x0132f406,
-/* 0x0a98: i2c_put_byte_done */
-/* 0x0a9a: i2c_addr */
-	0x76bb00f8,
+	0x64b608d9,
+	0x2911f404,
+	0x012ec3e7,
+	0xfd0134b6,
+	0x76bb0553,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb608d621,
-	0x11f40464,
-	0x2ec3e729,
-	0x0134b601,
-	0xbb0553fd,
+	0xb60a4221,
+/* 0x0ae2: i2c_addr_done */
+	0x00f80464,
+/* 0x0ae4: i2c_acquire_addr */
+	0xb6f8cec7,
+	0xe0b702e4,
+	0xee980d1c,
+/* 0x0af3: i2c_acquire */
+	0xf500f800,
+	0xf40ae421,
+	0xd9f00421,
+	0x4021f403,
+/* 0x0b02: i2c_release */
+	0x21f500f8,
+	0x21f40ae4,
+	0x03daf004,
+	0xf84021f4,
+/* 0x0b11: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xf413a001,
+	0x0032980c,
+	0x0ccc13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0a3f21f5,
-/* 0x0adf: i2c_addr_done */
-	0xf80464b6,
-/* 0x0ae1: i2c_acquire_addr */
-	0xf8cec700,
-	0xb702e4b6,
-	0x980d1ce0,
-	0x00f800ee,
-/* 0x0af0: i2c_acquire */
-	0x0ae121f5,
-	0xf00421f4,
-	0x21f403d9,
-/* 0x0aff: i2c_release */
-	0xf500f83f,
-	0xf40ae121,
-	0xdaf00421,
-	0x3f21f403,
-/* 0x0b0e: i2c_recv */
-	0x32f400f8,
-	0xf8c1c701,
-	0xb00214b6,
-	0x1ff52816,
-	0x13a0013a,
-	0x32980cf4,
-	0xcc13a000,
-	0x0031980c,
-	0xf90231f4,
-	0xf9e0f9d0,
-	0x0067f1d0,
-	0x0063f100,
-	0x01679210,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xf021f550,
-	0x0464b60a,
-	0xd6b0d0fc,
-	0xb31bf500,
-	0x0057f000,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x9a21f550,
-	0x0464b60a,
-	0x00d011f5,
-	0xbbe0c5c7,
+	0x0af321f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
 	0x65b60076,
 	0x9450f904,
 	0x56bb0465,
 	0xfd50bd02,
 	0x50fc0475,
-	0x0a3f21f5,
+	0x0a9d21f5,
 	0xf50464b6,
-	0xf000ad11,
-	0x76bb0157,
+	0xc700d011,
+	0x76bbe0c5,
 	0x0465b600,
 	0x659450f9,
 	0x0256bb04,
 	0x75fd50bd,
 	0xf550fc04,
-	0xb60a9a21,
+	0xb60a4221,
 	0x11f50464,
-	0x76bb008a,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb609ed21,
-	0x11f40464,
-	0xe05bcb6a,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x3221f550,
-	0x0464b609,
-	0xbd025bb9,
-	0x430ef474,
-/* 0x0c14: i2c_recv_not_rd08 */
-	0xf401d6b0,
-	0x57f03d1b,
-	0x9a21f500,
-	0x3311f40a,
-	0xf5e0c5c7,
-	0xf40a3f21,
-	0x57f02911,
-	0x9a21f500,
-	0x1f11f40a,
-	0xf5e0b5c7,
-	0xf40a3f21,
-	0x21f51511,
-	0x74bd0932,
-	0xf408c5c7,
-	0x32f4091b,
-	0x030ef402,
-/* 0x0c54: i2c_recv_not_wr08 */
-/* 0x0c54: i2c_recv_done */
-	0xf5f8cec7,
-	0xfc0aff21,
-	0xf4d0fce0,
-	0x7cb90a12,
-	0x3721f502,
-/* 0x0c69: i2c_recv_exit */
-/* 0x0c6b: i2c_init */
-	0xf800f803,
-/* 0x0c6d: test_recv */
-	0xd817f100,
-	0x0614b605,
-	0xb60011cf,
-	0x07f10110,
-	0x04b605d8,
-	0x0001d006,
-	0xe7f104bd,
-	0xe3f1d900,
-	0x21f5134f,
-	0x00f80257,
-/* 0x0c94: test_init */
-	0x0800e7f1,
-	0x025721f5,
-/* 0x0c9e: idle_recv */
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60a9d,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b609f0,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x093521f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x0c17: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x0a9d21f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40a42,
+	0x0057f029,
+	0x0a9d21f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40a42,
+	0x3521f515,
+	0xc774bd09,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x0c57: i2c_recv_not_wr08 */
+/* 0x0c57: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc0b02,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x033621f5,
+/* 0x0c6c: i2c_recv_exit */
+/* 0x0c6e: i2c_init */
 	0x00f800f8,
-/* 0x0ca0: idle */
-	0xf10031f4,
-	0xb605d417,
-	0x11cf0614,
-	0x0110b600,
-	0x05d407f1,
-	0xd00604b6,
-	0x04bd0001,
-/* 0x0cbc: idle_loop */
-	0xf45817f0,
-/* 0x0cc2: idle_proc */
-/* 0x0cc2: idle_proc_exec */
-	0x10f90232,
-	0xf5021eb9,
-	0xfc034021,
-	0x0911f410,
-	0xf40231f4,
-/* 0x0cd6: idle_proc_next */
-	0x10b6ef0e,
-	0x061fb858,
-	0xf4e61bf4,
-	0x28f4dd02,
-	0xbb0ef400,
-	0x00000000,
+/* 0x0c70: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0x5621f513,
+/* 0x0c97: test_init */
+	0xf100f802,
+	0xf50800e7,
+	0xf8025621,
+/* 0x0ca1: idle_recv */
+/* 0x0ca3: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x0cbf: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x0cc5: idle_proc */
+/* 0x0cc5: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc033f,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x0cd9: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
 	0x00000000,
 	0x00000000,
 	0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
index c2bb616a..f2420a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
@@ -98,8 +98,7 @@
 // $r0  - zero
 host_recv:
 	// message from intr handler == HOST->PWR comms pending
-	mov $r1 (PROC_KERN & 0x0000ffff)
-	sethi $r1 (PROC_KERN & 0xffff0000)
+	imm32($r1, PROC_KERN)
 	cmp b32 $r14 $r1
 	bra e #host_send
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
index ad35fa5..c20a3bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
@@ -51,8 +51,7 @@
 // $r0  - zero
 rd32:
 	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
-	mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
-	sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+	imm32($r13, NV_PPWR_MMIO_CTRL_OP_RD | NV_PPWR_MMIO_CTRL_TRIGGER)
 	nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
 	rd32_wait:
 		nv_iord($r13, NV_PPWR_MMIO_CTRL)
@@ -70,9 +69,7 @@
 wr32:
 	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
 	nv_iowr(NV_PPWR_MMIO_DATA, $r13)
-	mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
-	or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
-	sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+	imm32($r13, NV_PPWR_MMIO_CTRL_OP_WR | NV_PPWR_MMIO_CTRL_MASK_B32_0 | NV_PPWR_MMIO_CTRL_TRIGGER)
 
 #ifdef NVKM_FALCON_MMIO_TRAP
 	push $r13
@@ -215,8 +212,7 @@
 		bra z #intr_subintr_skip_fifo
 			nv_iord($r12, NV_PPWR_FIFO_INTR)
 			push $r12
-			mov $r14 (PROC_HOST & 0x0000ffff)
-			sethi $r14 (PROC_HOST & 0xffff0000)
+			imm32($r14, PROC_HOST)
 			mov $r13 KMSG_FIFO
 			call(send)
 			pop $r12
@@ -256,7 +252,7 @@
 
 	/* try not losing precision (multiply then divide) */
 	imm32($r13, HW_TICKS_PER_US)
-	call #mulu32_32_64
+	call(mulu32_32_64)
 
 	/* use an immeditate, it's ok because HW_TICKS_PER_US < 16 bits */
 	div $r12 $r12 1000
@@ -268,7 +264,7 @@
 	/* let's divide then multiply, too bad for the precision! */
 	div $r14 $r14 1000
 	imm32($r13, HW_TICKS_PER_US)
-	call #mulu32_32_64
+	call(mulu32_32_64)
 
 	/* this cannot overflow as long as HW_TICKS_PER_US < 1000 */
 
@@ -290,7 +286,7 @@
 
 	/* simply multiply $us by HW_TICKS_PER_US */
 	imm32($r13, HW_TICKS_PER_US)
-	call #mulu32_32_64
+	call(mulu32_32_64)
 	mov b32 $r14 $r12
 
 	/* check if there wasn't any overflow */
@@ -511,14 +507,12 @@
 #ifdef NVKM_FALCON_MMIO_UAS
 	// somehow allows the magic "access mmio via D[]" stuff that's
 	// used by the nv_rd32/nv_wr32 macros to work
-	mov $r1 0x0010
-	sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
+	imm32($r1, 0x10 | NV_PPWR_UAS_CONFIG_ENABLE)
 	nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
 #endif
 
 	// route all interrupts except user0/1 and pause to fuc
-	mov $r1 0x00e0
-	sethi $r1 0x00000000
+	imm32($r1, 0xe0)
 	nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
 
 	// enable watchdog and subintr intrs
@@ -529,8 +523,8 @@
 	nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
 
 	// enable interrupts globally
-	mov $r1 #intr
-	sethi $r1 0x00000000
+	imm32($r1, #intr)
+	and $r1 0xffff
 	mov $iv0 $r1
 	bset $flags ie0
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
index 96fc984..3737bd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
@@ -169,7 +169,7 @@
 */	.b32 0 /*
 */	.skip 64
 
-#if NV_PPWR_CHIPSET < GK208
+#if NVKM_PPWR_CHIPSET < GK208
 #define imm32(reg,val) /*
 */	movw reg  ((val) & 0x0000ffff) /*
 */	sethi reg ((val) & 0xffff0000)
@@ -252,12 +252,12 @@
 #endif
 
 #define st(size, addr, reg) /*
-*/	movw $r0 addr /*
+*/	imm32($r0, addr) /*
 */	st size D[$r0] reg /*
 */	clear b32 $r0
 
 #define ld(size, reg, addr) /*
-*/	movw $r0 addr /*
+*/	imm32($r0, addr)  /*
 */	ld size reg D[$r0] /*
 */	clear b32 $r0
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
index 0c3a71b..9e3f4e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
@@ -48,8 +48,7 @@
 	nv_iord($r1, NV_PPWR_DSCRATCH(2))
 	add b32 $r1 1
 	nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
-	mov $r14 -0x2700 /* 0xd900, envyas grrr! */
-	sethi $r14 0x134f0000
+	imm32($r14, 0x134fd900)
 	call(timer)
 	ret
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
new file mode 100644
index 0000000..b02b868
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/subdev/secboot/base.o
+nvkm-y += nvkm/subdev/secboot/gm200.o
+nvkm-y += nvkm/subdev/secboot/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
new file mode 100644
index 0000000..520facf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+#include <subdev/timer.h>
+
+static const char *
+managed_falcons_names[] = {
+	[NVKM_SECBOOT_FALCON_PMU] = "PMU",
+	[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
+	[NVKM_SECBOOT_FALCON_FECS] = "FECS",
+	[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
+	[NVKM_SECBOOT_FALCON_END] = "<invalid>",
+};
+
+/*
+ * Helper falcon functions
+ */
+
+static int
+falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base)
+{
+	int ret;
+
+	/* clear halt interrupt */
+	nvkm_mask(device, base + 0x004, 0x10, 0x10);
+	/* wait until halt interrupt is cleared */
+	ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int
+falcon_wait_idle(struct nvkm_device *device, u32 base)
+{
+	int ret;
+
+	ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int
+nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
+{
+	struct nvkm_device *device = sb->subdev.device;
+	int ret;
+
+	/* enable engine */
+	nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
+	nvkm_rd32(device, 0x200);
+	ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
+	if (ret < 0) {
+		nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+		nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+		return ret;
+	}
+
+	ret = falcon_wait_idle(device, sb->base);
+	if (ret)
+		return ret;
+
+	/* enable IRQs */
+	nvkm_wr32(device, sb->base + 0x010, 0xff);
+	nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
+	nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+
+	return 0;
+}
+
+static int
+nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
+{
+	struct nvkm_device *device = sb->subdev.device;
+
+	/* disable IRQs and wait for any previous code to complete */
+	nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
+	nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+	nvkm_wr32(device, sb->base + 0x014, 0xff);
+
+	falcon_wait_idle(device, sb->base);
+
+	/* disable engine */
+	nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+
+	return 0;
+}
+
+int
+nvkm_secboot_falcon_reset(struct nvkm_secboot *sb)
+{
+	int ret;
+
+	ret = nvkm_secboot_falcon_disable(sb);
+	if (ret)
+		return ret;
+
+	ret = nvkm_secboot_falcon_enable(sb);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
+ *
+ * This function is to be called after all chip-specific preparations have
+ * been completed. It will start the falcon to perform secure boot, wait for
+ * it to halt, and report if an error occurred.
+ */
+int
+nvkm_secboot_falcon_run(struct nvkm_secboot *sb)
+{
+	struct nvkm_device *device = sb->subdev.device;
+	int ret;
+
+	/* Start falcon */
+	nvkm_wr32(device, sb->base + 0x100, 0x2);
+
+	/* Wait for falcon halt */
+	ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10);
+	if (ret < 0)
+		return ret;
+
+	/* If mailbox register contains an error code, then ACR has failed */
+	ret = nvkm_rd32(device, sb->base + 0x040);
+	if (ret) {
+		nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret);
+		falcon_clear_halt_interrupt(device, sb->base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+/**
+ * nvkm_secboot_reset() - reset specified falcon
+ */
+int
+nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon)
+{
+	/* Unmanaged falcon? */
+	if (!(BIT(falcon) & sb->func->managed_falcons)) {
+		nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
+		return -EINVAL;
+	}
+
+	return sb->func->reset(sb, falcon);
+}
+
+/**
+ * nvkm_secboot_start() - start specified falcon
+ */
+int
+nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon)
+{
+	/* Unmanaged falcon? */
+	if (!(BIT(falcon) & sb->func->managed_falcons)) {
+		nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n");
+		return -EINVAL;
+	}
+
+	return sb->func->start(sb, falcon);
+}
+
+/**
+ * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
+ */
+bool
+nvkm_secboot_is_managed(struct nvkm_secboot *secboot,
+			enum nvkm_secboot_falcon fid)
+{
+	if (!secboot)
+		return false;
+
+	return secboot->func->managed_falcons & BIT(fid);
+}
+
+static int
+nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_secboot *sb = nvkm_secboot(subdev);
+	int ret = 0;
+
+	/* Call chip-specific init function */
+	if (sb->func->init)
+		ret = sb->func->init(sb);
+	if (ret) {
+		nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
+			   ret);
+		return ret;
+	}
+
+	/*
+	 * Build all blobs - the same blobs can be used to perform secure boot
+	 * multiple times
+	 */
+	if (sb->func->prepare_blobs)
+		ret = sb->func->prepare_blobs(sb);
+
+	return ret;
+}
+
+static int
+nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_secboot *sb = nvkm_secboot(subdev);
+	int ret = 0;
+
+	if (sb->func->fini)
+		ret = sb->func->fini(sb, suspend);
+
+	return ret;
+}
+
+static void *
+nvkm_secboot_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_secboot *sb = nvkm_secboot(subdev);
+	void *ret = NULL;
+
+	if (sb->func->dtor)
+		ret = sb->func->dtor(sb);
+
+	return ret;
+}
+
+static const struct nvkm_subdev_func
+nvkm_secboot = {
+	.oneinit = nvkm_secboot_oneinit,
+	.fini = nvkm_secboot_fini,
+	.dtor = nvkm_secboot_dtor,
+};
+
+int
+nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_secboot *sb)
+{
+	unsigned long fid;
+
+	nvkm_subdev_ctor(&nvkm_secboot, device, index, 0, &sb->subdev);
+	sb->func = func;
+
+	/* setup the performing falcon's base address and masks */
+	switch (func->boot_falcon) {
+	case NVKM_SECBOOT_FALCON_PMU:
+		sb->base = 0x10a000;
+		sb->irq_mask = 0x1000000;
+		sb->enable_mask = 0x2000;
+		break;
+	default:
+		nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
+		return -EINVAL;
+	};
+
+	nvkm_debug(&sb->subdev, "securely managed falcons:\n");
+	for_each_set_bit(fid, &sb->func->managed_falcons,
+			 NVKM_SECBOOT_FALCON_END)
+		nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
new file mode 100644
index 0000000..cc100dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -0,0 +1,1489 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Secure boot is the process by which NVIDIA-signed firmware is loaded into
+ * some of the falcons of a GPU. For production devices this is the only way
+ * for the firmware to access useful (but sensitive) registers.
+ *
+ * A Falcon microprocessor supporting advanced security modes can run in one of
+ * three modes:
+ *
+ * - Non-secure (NS). In this mode, functionality is similar to Falcon
+ *   architectures before security modes were introduced (pre-Maxwell), but
+ *   capability is restricted. In particular, certain registers may be
+ *   inaccessible for reads and/or writes, and physical memory access may be
+ *   disabled (on certain Falcon instances). This is the only possible mode that
+ *   can be used if you don't have microcode cryptographically signed by NVIDIA.
+ *
+ * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
+ *   not possible to read or write any Falcon internal state or Falcon registers
+ *   from outside the Falcon (for example, from the host system). The only way
+ *   to enable this mode is by loading microcode that has been signed by NVIDIA.
+ *   (The loading process involves tagging the IMEM block as secure, writing the
+ *   signature into a Falcon register, and starting execution. The hardware will
+ *   validate the signature, and if valid, grant HS privileges.)
+ *
+ * - Light Secure (LS). In this mode, the microprocessor has more privileges
+ *   than NS but fewer than HS. Some of the microprocessor state is visible to
+ *   host software to ease debugging. The only way to enable this mode is by HS
+ *   microcode enabling LS mode. Some privileges available to HS mode are not
+ *   available here. LS mode is introduced in GM20x.
+ *
+ * Secure boot consists in temporarily switching a HS-capable falcon (typically
+ * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
+ * load them, and switch managed falcons into LS mode. Once secure boot
+ * completes, no falcon remains in HS mode.
+ *
+ * Secure boot requires a write-protected memory region (WPR) which can only be
+ * written by the secure falcon. On dGPU, the driver sets up the WPR region in
+ * video memory. On Tegra, it is set up by the bootloader and its location and
+ * size written into memory controller registers.
+ *
+ * The secure boot process takes place as follows:
+ *
+ * 1) A LS blob is constructed that contains all the LS firmwares we want to
+ *    load, along with their signatures and bootloaders.
+ *
+ * 2) A HS blob (also called ACR) is created that contains the signed HS
+ *    firmware in charge of loading the LS firmwares into their respective
+ *    falcons.
+ *
+ * 3) The HS blob is loaded (via its own bootloader) and executed on the
+ *    HS-capable falcon. It authenticates itself, switches the secure falcon to
+ *    HS mode and setup the WPR region around the LS blob (dGPU) or copies the
+ *    LS blob into the WPR region (Tegra).
+ *
+ * 4) The LS blob is now secure from all external tampering. The HS falcon
+ *    checks the signatures of the LS firmwares and, if valid, switches the
+ *    managed falcons to LS mode and makes them ready to run the LS firmware.
+ *
+ * 5) The managed falcons remain in LS mode and can be started.
+ *
+ */
+
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <core/firmware.h>
+#include <subdev/fb.h>
+
+enum {
+	FALCON_DMAIDX_UCODE		= 0,
+	FALCON_DMAIDX_VIRT		= 1,
+	FALCON_DMAIDX_PHYS_VID		= 2,
+	FALCON_DMAIDX_PHYS_SYS_COH	= 3,
+	FALCON_DMAIDX_PHYS_SYS_NCOH	= 4,
+};
+
+/**
+ * struct fw_bin_header - header of firmware files
+ * @bin_magic:		always 0x3b1d14f0
+ * @bin_ver:		version of the bin format
+ * @bin_size:		entire image size including this header
+ * @header_offset:	offset of the firmware/bootloader header in the file
+ * @data_offset:	offset of the firmware/bootloader payload in the file
+ * @data_size:		size of the payload
+ *
+ * This header is located at the beginning of the HS firmware and HS bootloader
+ * files, to describe where the headers and data can be found.
+ */
+struct fw_bin_header {
+	u32 bin_magic;
+	u32 bin_ver;
+	u32 bin_size;
+	u32 header_offset;
+	u32 data_offset;
+	u32 data_size;
+};
+
+/**
+ * struct fw_bl_desc - firmware bootloader descriptor
+ * @start_tag:		starting tag of bootloader
+ * @desc_dmem_load_off:	DMEM offset of flcn_bl_dmem_desc
+ * @code_off:		offset of code section
+ * @code_size:		size of code section
+ * @data_off:		offset of data section
+ * @data_size:		size of data section
+ *
+ * This structure is embedded in bootloader firmware files at to describe the
+ * IMEM and DMEM layout expected by the bootloader.
+ */
+struct fw_bl_desc {
+	u32 start_tag;
+	u32 dmem_load_off;
+	u32 code_off;
+	u32 code_size;
+	u32 data_off;
+	u32 data_size;
+};
+
+
+/*
+ *
+ * LS blob structures
+ *
+ */
+
+/**
+ * struct lsf_ucode_desc - LS falcon signatures
+ * @prd_keys:		signature to use when the GPU is in production mode
+ * @dgb_keys:		signature to use when the GPU is in debug mode
+ * @b_prd_present:	whether the production key is present
+ * @b_dgb_present:	whether the debug key is present
+ * @falcon_id:		ID of the falcon the ucode applies to
+ *
+ * Directly loaded from a signature file.
+ */
+struct lsf_ucode_desc {
+	u8  prd_keys[2][16];
+	u8  dbg_keys[2][16];
+	u32 b_prd_present;
+	u32 b_dbg_present;
+	u32 falcon_id;
+};
+
+/**
+ * struct lsf_lsb_header - LS firmware header
+ * @signature:		signature to verify the firmware against
+ * @ucode_off:		offset of the ucode blob in the WPR region. The ucode
+ *                      blob contains the bootloader, code and data of the
+ *                      LS falcon
+ * @ucode_size:		size of the ucode blob, including bootloader
+ * @data_size:		size of the ucode blob data
+ * @bl_code_size:	size of the bootloader code
+ * @bl_imem_off:	offset in imem of the bootloader
+ * @bl_data_off:	offset of the bootloader data in WPR region
+ * @bl_data_size:	size of the bootloader data
+ * @app_code_off:	offset of the app code relative to ucode_off
+ * @app_code_size:	size of the app code
+ * @app_data_off:	offset of the app data relative to ucode_off
+ * @app_data_size:	size of the app data
+ * @flags:		flags for the secure bootloader
+ *
+ * This structure is written into the WPR region for each managed falcon. Each
+ * instance is referenced by the lsb_offset member of the corresponding
+ * lsf_wpr_header.
+ */
+struct lsf_lsb_header {
+	struct lsf_ucode_desc signature;
+	u32 ucode_off;
+	u32 ucode_size;
+	u32 data_size;
+	u32 bl_code_size;
+	u32 bl_imem_off;
+	u32 bl_data_off;
+	u32 bl_data_size;
+	u32 app_code_off;
+	u32 app_code_size;
+	u32 app_data_off;
+	u32 app_data_size;
+	u32 flags;
+#define LSF_FLAG_LOAD_CODE_AT_0		1
+#define LSF_FLAG_DMACTL_REQ_CTX		4
+#define LSF_FLAG_FORCE_PRIV_LOAD	8
+};
+
+/**
+ * struct lsf_wpr_header - LS blob WPR Header
+ * @falcon_id:		LS falcon ID
+ * @lsb_offset:		offset of the lsb_lsf_header in the WPR region
+ * @bootstrap_owner:	secure falcon reponsible for bootstrapping the LS falcon
+ * @lazy_bootstrap:	skip bootstrapping by ACR
+ * @status:		bootstrapping status
+ *
+ * An array of these is written at the beginning of the WPR region, one for
+ * each managed falcon. The array is terminated by an instance which falcon_id
+ * is LSF_FALCON_ID_INVALID.
+ */
+struct lsf_wpr_header {
+	u32  falcon_id;
+	u32  lsb_offset;
+	u32  bootstrap_owner;
+	u32  lazy_bootstrap;
+	u32  status;
+#define LSF_IMAGE_STATUS_NONE				0
+#define LSF_IMAGE_STATUS_COPY				1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED		2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED		3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE		4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED		5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY		6
+};
+
+
+/**
+ * struct ls_ucode_img_desc - descriptor of firmware image
+ * @descriptor_size:		size of this descriptor
+ * @image_size:			size of the whole image
+ * @bootloader_start_offset:	start offset of the bootloader in ucode image
+ * @bootloader_size:		size of the bootloader
+ * @bootloader_imem_offset:	start off set of the bootloader in IMEM
+ * @bootloader_entry_point:	entry point of the bootloader in IMEM
+ * @app_start_offset:		start offset of the LS firmware
+ * @app_size:			size of the LS firmware's code and data
+ * @app_imem_offset:		offset of the app in IMEM
+ * @app_imem_entry:		entry point of the app in IMEM
+ * @app_dmem_offset:		offset of the data in DMEM
+ * @app_resident_code_offset:	offset of app code from app_start_offset
+ * @app_resident_code_size:	size of the code
+ * @app_resident_data_offset:	offset of data from app_start_offset
+ * @app_resident_data_size:	size of data
+ *
+ * A firmware image contains the code, data, and bootloader of a given LS
+ * falcon in a single blob. This structure describes where everything is.
+ *
+ * This can be generated from a (bootloader, code, data) set if they have
+ * been loaded separately, or come directly from a file.
+ */
+struct ls_ucode_img_desc {
+	u32 descriptor_size;
+	u32 image_size;
+	u32 tools_version;
+	u32 app_version;
+	char date[64];
+	u32 bootloader_start_offset;
+	u32 bootloader_size;
+	u32 bootloader_imem_offset;
+	u32 bootloader_entry_point;
+	u32 app_start_offset;
+	u32 app_size;
+	u32 app_imem_offset;
+	u32 app_imem_entry;
+	u32 app_dmem_offset;
+	u32 app_resident_code_offset;
+	u32 app_resident_code_size;
+	u32 app_resident_data_offset;
+	u32 app_resident_data_size;
+	u32 nb_overlays;
+	struct {u32 start; u32 size; } load_ovl[64];
+	u32 compressed;
+};
+
+/**
+ * struct ls_ucode_img - temporary storage for loaded LS firmwares
+ * @node:		to link within lsf_ucode_mgr
+ * @falcon_id:		ID of the falcon this LS firmware is for
+ * @ucode_desc:		loaded or generated map of ucode_data
+ * @ucode_header:	header of the firmware
+ * @ucode_data:		firmware payload (code and data)
+ * @ucode_size:		size in bytes of data in ucode_data
+ * @wpr_header:		WPR header to be written to the LS blob
+ * @lsb_header:		LSB header to be written to the LS blob
+ *
+ * Preparing the WPR LS blob requires information about all the LS firmwares
+ * (size, etc) to be known. This structure contains all the data of one LS
+ * firmware.
+ */
+struct ls_ucode_img {
+	struct list_head node;
+	enum nvkm_secboot_falcon falcon_id;
+
+	struct ls_ucode_img_desc ucode_desc;
+	u32 *ucode_header;
+	u8 *ucode_data;
+	u32 ucode_size;
+
+	struct lsf_wpr_header wpr_header;
+	struct lsf_lsb_header lsb_header;
+};
+
+/**
+ * struct ls_ucode_mgr - manager for all LS falcon firmwares
+ * @count:	number of managed LS falcons
+ * @wpr_size:	size of the required WPR region in bytes
+ * @img_list:	linked list of lsf_ucode_img
+ */
+struct ls_ucode_mgr {
+	u16 count;
+	u32 wpr_size;
+	struct list_head img_list;
+};
+
+
+/*
+ *
+ * HS blob structures
+ *
+ */
+
+/**
+ * struct hsf_fw_header - HS firmware descriptor
+ * @sig_dbg_offset:	offset of the debug signature
+ * @sig_dbg_size:	size of the debug signature
+ * @sig_prod_offset:	offset of the production signature
+ * @sig_prod_size:	size of the production signature
+ * @patch_loc:		offset of the offset (sic) of where the signature is
+ * @patch_sig:		offset of the offset (sic) to add to sig_*_offset
+ * @hdr_offset:		offset of the load header (see struct hs_load_header)
+ * @hdr_size:		size of above header
+ *
+ * This structure is embedded in the HS firmware image at
+ * hs_bin_hdr.header_offset.
+ */
+struct hsf_fw_header {
+	u32 sig_dbg_offset;
+	u32 sig_dbg_size;
+	u32 sig_prod_offset;
+	u32 sig_prod_size;
+	u32 patch_loc;
+	u32 patch_sig;
+	u32 hdr_offset;
+	u32 hdr_size;
+};
+
+/**
+ * struct hsf_load_header - HS firmware load header
+ */
+struct hsf_load_header {
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 data_dma_base;
+	u32 data_size;
+	u32 num_apps;
+	struct {
+		u32 sec_code_off;
+		u32 sec_code_size;
+	} app[0];
+};
+
+/**
+ * Convenience function to duplicate a firmware file in memory and check that
+ * it has the required minimum size.
+ */
+static void *
+gm200_secboot_load_firmware(struct nvkm_subdev *subdev, const char *name,
+		    size_t min_size)
+{
+	const struct firmware *fw;
+	void *blob;
+	int ret;
+
+	ret = nvkm_firmware_get(subdev->device, name, &fw);
+	if (ret)
+		return ERR_PTR(ret);
+	if (fw->size < min_size) {
+		nvkm_error(subdev, "%s is smaller than expected size %zu\n",
+			   name, min_size);
+		nvkm_firmware_put(fw);
+		return ERR_PTR(-EINVAL);
+	}
+	blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
+	nvkm_firmware_put(fw);
+	if (!blob)
+		return ERR_PTR(-ENOMEM);
+
+	return blob;
+}
+
+
+/*
+ * Low-secure blob creation
+ */
+
+#define BL_DESC_BLK_SIZE 256
+/**
+ * Build a ucode image and descriptor from provided bootloader, code and data.
+ *
+ * @bl:		bootloader image, including 16-bytes descriptor
+ * @code:	LS firmware code segment
+ * @data:	LS firmware data segment
+ * @desc:	ucode descriptor to be written
+ *
+ * Return: allocated ucode image with corresponding descriptor information. desc
+ *         is also updated to contain the right offsets within returned image.
+ */
+static void *
+ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
+		   const struct firmware *data, struct ls_ucode_img_desc *desc)
+{
+	struct fw_bin_header *bin_hdr = (void *)bl->data;
+	struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
+	void *bl_data = (void *)bl->data + bin_hdr->data_offset;
+	u32 pos = 0;
+	void *image;
+
+	desc->bootloader_start_offset = pos;
+	desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
+	desc->bootloader_imem_offset = bl_desc->start_tag * 256;
+	desc->bootloader_entry_point = bl_desc->start_tag * 256;
+
+	pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
+	desc->app_start_offset = pos;
+	desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
+			 ALIGN(data->size, BL_DESC_BLK_SIZE);
+	desc->app_imem_offset = 0;
+	desc->app_imem_entry = 0;
+	desc->app_dmem_offset = 0;
+	desc->app_resident_code_offset = 0;
+	desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
+
+	pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
+	desc->app_resident_data_offset = pos - desc->app_start_offset;
+	desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
+
+	desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
+			   desc->app_size;
+
+	image = kzalloc(desc->image_size, GFP_KERNEL);
+	if (!image)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(image + desc->bootloader_start_offset, bl_data,
+	       bl_desc->code_size);
+	memcpy(image + desc->app_start_offset, code->data, code->size);
+	memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
+	       data->data, data->size);
+
+	return image;
+}
+
+/**
+ * ls_ucode_img_load_generic() - load and prepare a LS ucode image
+ *
+ * Load the LS microcode, bootloader and signature and pack them into a single
+ * blob. Also generate the corresponding ucode descriptor.
+ */
+static int
+ls_ucode_img_load_generic(struct nvkm_subdev *subdev,
+			  struct ls_ucode_img *img, const char *falcon_name,
+			  const u32 falcon_id)
+{
+	const struct firmware *bl, *code, *data;
+	struct lsf_ucode_desc *lsf_desc;
+	char f[64];
+	int ret;
+
+	img->ucode_header = NULL;
+
+	snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
+	ret = nvkm_firmware_get(subdev->device, f, &bl);
+	if (ret)
+		goto error;
+
+	snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
+	ret = nvkm_firmware_get(subdev->device, f, &code);
+	if (ret)
+		goto free_bl;
+
+	snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
+	ret = nvkm_firmware_get(subdev->device, f, &data);
+	if (ret)
+		goto free_inst;
+
+	img->ucode_data = ls_ucode_img_build(bl, code, data,
+					     &img->ucode_desc);
+	if (IS_ERR(img->ucode_data)) {
+		ret = PTR_ERR(img->ucode_data);
+		goto free_data;
+	}
+	img->ucode_size = img->ucode_desc.image_size;
+
+	snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
+	lsf_desc = gm200_secboot_load_firmware(subdev, f, sizeof(*lsf_desc));
+	if (IS_ERR(lsf_desc)) {
+		ret = PTR_ERR(lsf_desc);
+		goto free_image;
+	}
+	/* not needed? the signature should already have the right value */
+	lsf_desc->falcon_id = falcon_id;
+	memcpy(&img->lsb_header.signature, lsf_desc, sizeof(*lsf_desc));
+	img->falcon_id = lsf_desc->falcon_id;
+	kfree(lsf_desc);
+
+	/* success path - only free requested firmware files */
+	goto free_data;
+
+free_image:
+	kfree(img->ucode_data);
+free_data:
+	nvkm_firmware_put(data);
+free_inst:
+	nvkm_firmware_put(code);
+free_bl:
+	nvkm_firmware_put(bl);
+error:
+	return ret;
+}
+
+typedef int (*lsf_load_func)(struct nvkm_subdev *, struct ls_ucode_img *);
+
+static int
+ls_ucode_img_load_fecs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
+{
+	return ls_ucode_img_load_generic(subdev, img, "fecs",
+					 NVKM_SECBOOT_FALCON_FECS);
+}
+
+static int
+ls_ucode_img_load_gpccs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
+{
+	return ls_ucode_img_load_generic(subdev, img, "gpccs",
+					 NVKM_SECBOOT_FALCON_GPCCS);
+}
+
+/**
+ * ls_ucode_img_load() - create a lsf_ucode_img and load it
+ */
+static struct ls_ucode_img *
+ls_ucode_img_load(struct nvkm_subdev *subdev, lsf_load_func load_func)
+{
+	struct ls_ucode_img *img;
+	int ret;
+
+	img = kzalloc(sizeof(*img), GFP_KERNEL);
+	if (!img)
+		return ERR_PTR(-ENOMEM);
+
+	ret = load_func(subdev, img);
+	if (ret) {
+		kfree(img);
+		return ERR_PTR(ret);
+	}
+
+	return img;
+}
+
+static const lsf_load_func lsf_load_funcs[] = {
+	[NVKM_SECBOOT_FALCON_END] = NULL, /* reserve enough space */
+	[NVKM_SECBOOT_FALCON_FECS] = ls_ucode_img_load_fecs,
+	[NVKM_SECBOOT_FALCON_GPCCS] = ls_ucode_img_load_gpccs,
+};
+
+/**
+ * ls_ucode_img_populate_bl_desc() - populate a DMEM BL descriptor for LS image
+ * @img:	ucode image to generate against
+ * @desc:	descriptor to populate
+ * @sb:		secure boot state to use for base addresses
+ *
+ * Populate the DMEM BL descriptor with the information contained in a
+ * ls_ucode_desc.
+ *
+ */
+static void
+ls_ucode_img_populate_bl_desc(struct ls_ucode_img *img, u64 wpr_addr,
+			      struct gm200_flcn_bl_desc *desc)
+{
+	struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+	u64 addr_base;
+
+	addr_base = wpr_addr + img->lsb_header.ucode_off +
+		    pdesc->app_start_offset;
+
+	memset(desc, 0, sizeof(*desc));
+	desc->ctx_dma = FALCON_DMAIDX_UCODE;
+	desc->code_dma_base.lo = lower_32_bits(
+		(addr_base + pdesc->app_resident_code_offset));
+	desc->code_dma_base.hi = upper_32_bits(
+		(addr_base + pdesc->app_resident_code_offset));
+	desc->non_sec_code_size = pdesc->app_resident_code_size;
+	desc->data_dma_base.lo = lower_32_bits(
+		(addr_base + pdesc->app_resident_data_offset));
+	desc->data_dma_base.hi = upper_32_bits(
+		(addr_base + pdesc->app_resident_data_offset));
+	desc->data_size = pdesc->app_resident_data_size;
+	desc->code_entry_point = pdesc->app_imem_entry;
+}
+
+#define LSF_LSB_HEADER_ALIGN 256
+#define LSF_BL_DATA_ALIGN 256
+#define LSF_BL_DATA_SIZE_ALIGN 256
+#define LSF_BL_CODE_SIZE_ALIGN 256
+#define LSF_UCODE_DATA_ALIGN 4096
+
+/**
+ * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image
+ * @gsb:	secure boot device used
+ * @img:	image to generate for
+ * @offset:	offset in the WPR region where this image starts
+ *
+ * Allocate space in the WPR area from offset and write the WPR and LSB headers
+ * accordingly.
+ *
+ * Return: offset at the end of this image.
+ */
+static u32
+ls_ucode_img_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_img *img,
+			  u32 offset)
+{
+	struct lsf_wpr_header *whdr = &img->wpr_header;
+	struct lsf_lsb_header *lhdr = &img->lsb_header;
+	struct ls_ucode_img_desc *desc = &img->ucode_desc;
+
+	if (img->ucode_header) {
+		nvkm_fatal(&gsb->base.subdev,
+			    "images withough loader are not supported yet!\n");
+		return offset;
+	}
+
+	/* Fill WPR header */
+	whdr->falcon_id = img->falcon_id;
+	whdr->bootstrap_owner = gsb->base.func->boot_falcon;
+	whdr->status = LSF_IMAGE_STATUS_COPY;
+
+	/* Align, save off, and include an LSB header size */
+	offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
+	whdr->lsb_offset = offset;
+	offset += sizeof(struct lsf_lsb_header);
+
+	/*
+	 * Align, save off, and include the original (static) ucode
+	 * image size
+	 */
+	offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
+	lhdr->ucode_off = offset;
+	offset += img->ucode_size;
+
+	/*
+	 * For falcons that use a boot loader (BL), we append a loader
+	 * desc structure on the end of the ucode image and consider
+	 * this the boot loader data. The host will then copy the loader
+	 * desc args to this space within the WPR region (before locking
+	 * down) and the HS bin will then copy them to DMEM 0 for the
+	 * loader.
+	 */
+	lhdr->bl_code_size = ALIGN(desc->bootloader_size,
+				   LSF_BL_CODE_SIZE_ALIGN);
+	lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
+				 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
+	lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
+				lhdr->bl_code_size - lhdr->ucode_size;
+	/*
+	 * Though the BL is located at 0th offset of the image, the VA
+	 * is different to make sure that it doesn't collide the actual
+	 * OS VA range
+	 */
+	lhdr->bl_imem_off = desc->bootloader_imem_offset;
+	lhdr->app_code_off = desc->app_start_offset +
+			     desc->app_resident_code_offset;
+	lhdr->app_code_size = desc->app_resident_code_size;
+	lhdr->app_data_off = desc->app_start_offset +
+			     desc->app_resident_data_offset;
+	lhdr->app_data_size = desc->app_resident_data_size;
+
+	lhdr->flags = 0;
+	if (img->falcon_id == gsb->base.func->boot_falcon)
+		lhdr->flags = LSF_FLAG_DMACTL_REQ_CTX;
+
+	/* GPCCS will be loaded using PRI */
+	if (img->falcon_id == NVKM_SECBOOT_FALCON_GPCCS)
+		lhdr->flags |= LSF_FLAG_FORCE_PRIV_LOAD;
+
+	/* Align (size bloat) and save off BL descriptor size */
+	lhdr->bl_data_size = ALIGN(sizeof(struct gm200_flcn_bl_desc),
+				   LSF_BL_DATA_SIZE_ALIGN);
+	/*
+	 * Align, save off, and include the additional BL data
+	 */
+	offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
+	lhdr->bl_data_off = offset;
+	offset += lhdr->bl_data_size;
+
+	return offset;
+}
+
+static void
+ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
+{
+	memset(mgr, 0, sizeof(*mgr));
+	INIT_LIST_HEAD(&mgr->img_list);
+}
+
+static void
+ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
+{
+	struct ls_ucode_img *img, *t;
+
+	list_for_each_entry_safe(img, t, &mgr->img_list, node) {
+		kfree(img->ucode_data);
+		kfree(img->ucode_header);
+		kfree(img);
+	}
+}
+
+static void
+ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
+{
+	mgr->count++;
+	list_add_tail(&img->node, &mgr->img_list);
+}
+
+/**
+ * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
+ */
+static void
+ls_ucode_mgr_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr)
+{
+	struct ls_ucode_img *img;
+	u32 offset;
+
+	/*
+	 * Start with an array of WPR headers at the base of the WPR.
+	 * The expectation here is that the secure falcon will do a single DMA
+	 * read of this array and cache it internally so it's ok to pack these.
+	 * Also, we add 1 to the falcon count to indicate the end of the array.
+	 */
+	offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1);
+
+	/*
+	 * Walk the managed falcons, accounting for the LSB structs
+	 * as well as the ucode images.
+	 */
+	list_for_each_entry(img, &mgr->img_list, node) {
+		offset = ls_ucode_img_fill_headers(gsb, img, offset);
+	}
+
+	mgr->wpr_size = offset;
+}
+
+/**
+ * ls_ucode_mgr_write_wpr - write the WPR blob contents
+ */
+static int
+ls_ucode_mgr_write_wpr(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr,
+		       struct nvkm_gpuobj *wpr_blob)
+{
+	struct ls_ucode_img *img;
+	u32 pos = 0;
+
+	nvkm_kmap(wpr_blob);
+
+	list_for_each_entry(img, &mgr->img_list, node) {
+		nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
+				      sizeof(img->wpr_header));
+
+		nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
+				     &img->lsb_header, sizeof(img->lsb_header));
+
+		/* Generate and write BL descriptor */
+		if (!img->ucode_header) {
+			u8 desc[gsb->func->bl_desc_size];
+			struct gm200_flcn_bl_desc gdesc;
+
+			ls_ucode_img_populate_bl_desc(img, gsb->wpr_addr,
+						      &gdesc);
+			gsb->func->fixup_bl_desc(&gdesc, &desc);
+			nvkm_gpuobj_memcpy_to(wpr_blob,
+					      img->lsb_header.bl_data_off,
+					      &desc, gsb->func->bl_desc_size);
+		}
+
+		/* Copy ucode */
+		nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
+				      img->ucode_data, img->ucode_size);
+
+		pos += sizeof(img->wpr_header);
+	}
+
+	nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
+
+	nvkm_done(wpr_blob);
+
+	return 0;
+}
+
+/* Both size and address of WPR need to be 128K-aligned */
+#define WPR_ALIGNMENT	0x20000
+/**
+ * gm200_secboot_prepare_ls_blob() - prepare the LS blob
+ *
+ * For each securely managed falcon, load the FW, signatures and bootloaders and
+ * prepare a ucode blob. Then, compute the offsets in the WPR region for each
+ * blob, and finally write the headers and ucode blobs into a GPU object that
+ * will be copied into the WPR region by the HS firmware.
+ */
+static int
+gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
+{
+	struct nvkm_secboot *sb = &gsb->base;
+	struct nvkm_device *device = sb->subdev.device;
+	struct ls_ucode_mgr mgr;
+	int falcon_id;
+	int ret;
+
+	ls_ucode_mgr_init(&mgr);
+
+	/* Load all LS blobs */
+	for_each_set_bit(falcon_id, &gsb->base.func->managed_falcons,
+			 NVKM_SECBOOT_FALCON_END) {
+		struct ls_ucode_img *img;
+
+		img = ls_ucode_img_load(&sb->subdev, lsf_load_funcs[falcon_id]);
+
+		if (IS_ERR(img)) {
+			ret = PTR_ERR(img);
+			goto cleanup;
+		}
+		ls_ucode_mgr_add_img(&mgr, img);
+	}
+
+	/*
+	 * Fill the WPR and LSF headers with the right offsets and compute
+	 * required WPR size
+	 */
+	ls_ucode_mgr_fill_headers(gsb, &mgr);
+	mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT);
+
+	/* Allocate GPU object that will contain the WPR region */
+	ret = nvkm_gpuobj_new(device, mgr.wpr_size, WPR_ALIGNMENT, false, NULL,
+			      &gsb->ls_blob);
+	if (ret)
+		goto cleanup;
+
+	nvkm_debug(&sb->subdev, "%d managed LS falcons, WPR size is %d bytes\n",
+		    mgr.count, mgr.wpr_size);
+
+	/* If WPR address and size are not fixed, set them to fit the LS blob */
+	if (!gsb->wpr_size) {
+		gsb->wpr_addr = gsb->ls_blob->addr;
+		gsb->wpr_size = gsb->ls_blob->size;
+	}
+
+	/* Write LS blob */
+	ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+
+cleanup:
+	ls_ucode_mgr_cleanup(&mgr);
+
+	return ret;
+}
+
+/*
+ * High-secure blob creation
+ */
+
+/**
+ * gm200_secboot_hsf_patch_signature() - patch HS blob with correct signature
+ */
+static void
+gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image)
+{
+	struct nvkm_secboot *sb = &gsb->base;
+	struct fw_bin_header *hsbin_hdr = acr_image;
+	struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+	void *hs_data = acr_image + hsbin_hdr->data_offset;
+	void *sig;
+	u32 sig_size;
+
+	/* Falcon in debug or production mode? */
+	if ((nvkm_rd32(sb->subdev.device, sb->base + 0xc08) >> 20) & 0x1) {
+		sig = acr_image + fw_hdr->sig_dbg_offset;
+		sig_size = fw_hdr->sig_dbg_size;
+	} else {
+		sig = acr_image + fw_hdr->sig_prod_offset;
+		sig_size = fw_hdr->sig_prod_size;
+	}
+
+	/* Patch signature */
+	memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
+}
+
+/**
+ * gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image
+ */
+static void
+gm200_secboot_populate_hsf_bl_desc(void *acr_image,
+				   struct gm200_flcn_bl_desc *bl_desc)
+{
+	struct fw_bin_header *hsbin_hdr = acr_image;
+	struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+	struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
+
+	/*
+	 * Descriptor for the bootloader that will load the ACR image into
+	 * IMEM/DMEM memory.
+	 */
+	fw_hdr = acr_image + hsbin_hdr->header_offset;
+	load_hdr = acr_image + fw_hdr->hdr_offset;
+	memset(bl_desc, 0, sizeof(*bl_desc));
+	bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+	bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
+	bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
+	bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
+	bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
+	bl_desc->code_entry_point = 0;
+	/*
+	 * We need to set code_dma_base to the virtual address of the acr_blob,
+	 * and add this address to data_dma_base before writing it into DMEM
+	 */
+	bl_desc->code_dma_base.lo = 0;
+	bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
+	bl_desc->data_size = load_hdr->data_size;
+}
+
+/**
+ * gm200_secboot_prepare_hs_blob - load and prepare a HS blob and BL descriptor
+ *
+ * @gsb secure boot instance to prepare for
+ * @fw name of the HS firmware to load
+ * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
+ * @bl_desc pointer to the BL descriptor to write for this firmware
+ * @patch whether we should patch the HS descriptor (only for HS loaders)
+ */
+static int
+gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw,
+			      struct nvkm_gpuobj **blob,
+			      struct gm200_flcn_bl_desc *bl_desc, bool patch)
+{
+	struct nvkm_subdev *subdev = &gsb->base.subdev;
+	void *acr_image;
+	struct fw_bin_header *hsbin_hdr;
+	struct hsf_fw_header *fw_hdr;
+	void *acr_data;
+	struct hsf_load_header *load_hdr;
+	struct hsflcn_acr_desc *desc;
+	int ret;
+
+	acr_image = gm200_secboot_load_firmware(subdev, fw, 0);
+	if (IS_ERR(acr_image))
+		return PTR_ERR(acr_image);
+	hsbin_hdr = acr_image;
+
+	/* Patch signature */
+	gm200_secboot_hsf_patch_signature(gsb, acr_image);
+
+	acr_data = acr_image + hsbin_hdr->data_offset;
+
+	/* Patch descriptor? */
+	if (patch) {
+		fw_hdr = acr_image + hsbin_hdr->header_offset;
+		load_hdr = acr_image + fw_hdr->hdr_offset;
+		desc = acr_data + load_hdr->data_dma_base;
+		gsb->func->fixup_hs_desc(gsb, desc);
+	}
+
+	/* Generate HS BL descriptor */
+	gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc);
+
+	/* Create ACR blob and copy HS data to it */
+	ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
+			      0x1000, false, NULL, blob);
+	if (ret)
+		goto cleanup;
+
+	nvkm_kmap(*blob);
+	nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
+	nvkm_done(*blob);
+
+cleanup:
+	kfree(acr_image);
+
+	return ret;
+}
+
+/*
+ * High-secure bootloader blob creation
+ */
+
+static int
+gm200_secboot_prepare_hsbl_blob(struct gm200_secboot *gsb)
+{
+	struct nvkm_subdev *subdev = &gsb->base.subdev;
+
+	gsb->hsbl_blob = gm200_secboot_load_firmware(subdev, "acr/bl", 0);
+	if (IS_ERR(gsb->hsbl_blob)) {
+		int ret = PTR_ERR(gsb->hsbl_blob);
+
+		gsb->hsbl_blob = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * gm20x_secboot_prepare_blobs - load blobs common to all GM20X GPUs.
+ *
+ * This includes the LS blob, HS ucode loading blob, and HS bootloader.
+ *
+ * The HS ucode unload blob is only used on dGPU.
+ */
+int
+gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+	int ret;
+
+	/* Load and prepare the managed falcon's firmwares */
+	ret = gm200_secboot_prepare_ls_blob(gsb);
+	if (ret)
+		return ret;
+
+	/* Load the HS firmware that will load the LS firmwares */
+	ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+					    &gsb->acr_load_blob,
+					    &gsb->acr_load_bl_desc, true);
+	if (ret)
+		return ret;
+
+	/* Load the HS firmware bootloader */
+	ret = gm200_secboot_prepare_hsbl_blob(gsb);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int ret;
+
+	ret = gm20x_secboot_prepare_blobs(gsb);
+	if (ret)
+		return ret;
+
+	/* dGPU only: load the HS firmware that unprotects the WPR region */
+	ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+					    &gsb->acr_unload_blob,
+					    &gsb->acr_unload_bl_desc, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+
+
+/*
+ * Secure Boot Execution
+ */
+
+/**
+ * gm200_secboot_load_hs_bl() - load HS bootloader into DMEM and IMEM
+ */
+static void
+gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, void *data, u32 data_size)
+{
+	struct nvkm_device *device = gsb->base.subdev.device;
+	struct fw_bin_header *hdr = gsb->hsbl_blob;
+	struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
+	void *blob_data = gsb->hsbl_blob + hdr->data_offset;
+	void *hsbl_code = blob_data + hsbl_desc->code_off;
+	void *hsbl_data = blob_data + hsbl_desc->data_off;
+	u32 code_size = ALIGN(hsbl_desc->code_size, 256);
+	const u32 base = gsb->base.base;
+	u32 blk;
+	u32 tag;
+	int i;
+
+	/*
+	 * Copy HS bootloader data
+	 */
+	nvkm_wr32(device, base + 0x1c0, (0x00000000 | (0x1 << 24)));
+	for (i = 0; i < hsbl_desc->data_size / 4; i++)
+		nvkm_wr32(device, base + 0x1c4, ((u32 *)hsbl_data)[i]);
+
+	/*
+	 * Copy HS bootloader interface structure where the HS descriptor
+	 * expects it to be
+	 */
+	nvkm_wr32(device, base + 0x1c0,
+		  (hsbl_desc->dmem_load_off | (0x1 << 24)));
+	for (i = 0; i < data_size / 4; i++)
+		nvkm_wr32(device, base + 0x1c4, ((u32 *)data)[i]);
+
+	/* Copy HS bootloader code to end of IMEM */
+	blk = (nvkm_rd32(device, base + 0x108) & 0x1ff) - (code_size >> 8);
+	tag = hsbl_desc->start_tag;
+	nvkm_wr32(device, base + 0x180, ((blk & 0xff) << 8) | (0x1 << 24));
+	for (i = 0; i < code_size / 4; i++) {
+		/* write new tag every 256B */
+		if ((i & 0x3f) == 0) {
+			nvkm_wr32(device, base + 0x188, tag & 0xffff);
+			tag++;
+		}
+		nvkm_wr32(device, base + 0x184, ((u32 *)hsbl_code)[i]);
+	}
+	nvkm_wr32(device, base + 0x188, 0);
+}
+
+/**
+ * gm200_secboot_setup_falcon() - set up the secure falcon for secure boot
+ */
+static int
+gm200_secboot_setup_falcon(struct gm200_secboot *gsb)
+{
+	struct nvkm_device *device = gsb->base.subdev.device;
+	struct fw_bin_header *hdr = gsb->hsbl_blob;
+	struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
+	/* virtual start address for boot vector */
+	u32 virt_addr = hsbl_desc->start_tag << 8;
+	const u32 base = gsb->base.base;
+	const u32 reg_base = base + 0xe00;
+	u32 inst_loc;
+	int ret;
+
+	ret = nvkm_secboot_falcon_reset(&gsb->base);
+	if (ret)
+		return ret;
+
+	/* setup apertures - virtual */
+	nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_UCODE), 0x4);
+	nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_VIRT), 0x0);
+	/* setup apertures - physical */
+	nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_VID), 0x4);
+	nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_COH),
+		  0x4 | 0x1);
+	nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_NCOH),
+		  0x4 | 0x2);
+
+	/* Set context */
+	if (nvkm_memory_target(gsb->inst->memory) == NVKM_MEM_TARGET_VRAM)
+		inst_loc = 0x0; /* FB */
+	else
+		inst_loc = 0x3; /* Non-coherent sysmem */
+
+	nvkm_mask(device, base + 0x048, 0x1, 0x1);
+	nvkm_wr32(device, base + 0x480,
+		  ((gsb->inst->addr >> 12) & 0xfffffff) |
+		  (inst_loc << 28) | (1 << 30));
+
+	/* Set boot vector to code's starting virtual address */
+	nvkm_wr32(device, base + 0x104, virt_addr);
+
+	return 0;
+}
+
+/**
+ * gm200_secboot_run_hs_blob() - run the given high-secure blob
+ */
+static int
+gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
+			  struct gm200_flcn_bl_desc *desc)
+{
+	struct nvkm_vma vma;
+	u64 vma_addr;
+	const u32 bl_desc_size = gsb->func->bl_desc_size;
+	u8 bl_desc[bl_desc_size];
+	int ret;
+
+	/* Map the HS firmware so the HS bootloader can see it */
+	ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+	if (ret)
+		return ret;
+
+	/* Add the mapping address to the DMA bases */
+	vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset;
+	desc->code_dma_base.lo = lower_32_bits(vma_addr);
+	desc->code_dma_base.hi = upper_32_bits(vma_addr);
+	vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset;
+	desc->data_dma_base.lo = lower_32_bits(vma_addr);
+	desc->data_dma_base.hi = upper_32_bits(vma_addr);
+
+	/* Fixup the BL header */
+	gsb->func->fixup_bl_desc(desc, &bl_desc);
+
+	/* Reset the falcon and make it ready to run the HS bootloader */
+	ret = gm200_secboot_setup_falcon(gsb);
+	if (ret)
+		goto done;
+
+	/* Load the HS bootloader into the falcon's IMEM/DMEM */
+	gm200_secboot_load_hs_bl(gsb, &bl_desc, bl_desc_size);
+
+	/* Start the HS bootloader */
+	ret = nvkm_secboot_falcon_run(&gsb->base);
+	if (ret)
+		goto done;
+
+done:
+	/* Restore the original DMA addresses */
+	vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset;
+	desc->code_dma_base.lo = lower_32_bits(vma_addr);
+	desc->code_dma_base.hi = upper_32_bits(vma_addr);
+	vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset;
+	desc->data_dma_base.lo = lower_32_bits(vma_addr);
+	desc->data_dma_base.hi = upper_32_bits(vma_addr);
+
+	/* We don't need the ACR firmware anymore */
+	nvkm_gpuobj_unmap(&vma);
+
+	return ret;
+}
+
+/*
+ * gm200_secboot_reset() - execute secure boot from the prepared state
+ *
+ * Load the HS bootloader and ask the falcon to run it. This will in turn
+ * load the HS firmware and run it, so once the falcon stops all the managed
+ * falcons should have their LS firmware loaded and be ready to run.
+ */
+int
+gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int ret;
+
+	/*
+	 * Dummy GM200 implementation: perform secure boot each time we are
+	 * called on FECS. Since only FECS and GPCCS are managed and started
+	 * together, this ought to be safe.
+	 *
+	 * Once we have proper PMU firmware and support, this will be changed
+	 * to a proper call to the PMU method.
+	 */
+	if (falcon != NVKM_SECBOOT_FALCON_FECS)
+		goto end;
+
+	/* If WPR is set and we have an unload blob, run it to unlock WPR */
+	if (gsb->acr_unload_blob &&
+	    gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) {
+		ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
+						&gsb->acr_unload_bl_desc);
+		if (ret)
+			return ret;
+	}
+
+	/* Reload all managed falcons */
+	ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob,
+					&gsb->acr_load_bl_desc);
+	if (ret)
+		return ret;
+
+end:
+	gsb->falcon_state[falcon] = RESET;
+	return 0;
+}
+
+int
+gm200_secboot_start(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int base;
+
+	switch (falcon) {
+	case NVKM_SECBOOT_FALCON_FECS:
+		base = 0x409000;
+		break;
+	case NVKM_SECBOOT_FALCON_GPCCS:
+		base = 0x41a000;
+		break;
+	default:
+		nvkm_error(&sb->subdev, "cannot start unhandled falcon!\n");
+		return -EINVAL;
+	}
+
+	nvkm_wr32(sb->subdev.device, base + 0x130, 0x00000002);
+	gsb->falcon_state[falcon] = RUNNING;
+
+	return 0;
+}
+
+
+
+int
+gm200_secboot_init(struct nvkm_secboot *sb)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	struct nvkm_device *device = sb->subdev.device;
+	struct nvkm_vm *vm;
+	const u64 vm_area_len = 600 * 1024;
+	int ret;
+
+	/* Allocate instance block and VM */
+	ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_vm_new(device, 0, vm_area_len, 0, NULL, &vm);
+	if (ret)
+		return ret;
+
+	atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
+
+	ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
+	nvkm_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(gsb->inst);
+	nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
+	nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
+	nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
+	nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
+	nvkm_done(gsb->inst);
+
+	return 0;
+}
+
+int
+gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int ret = 0;
+	int i;
+
+	/* Run the unload blob to unprotect the WPR region */
+	if (gsb->acr_unload_blob &&
+	    gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
+		ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
+						&gsb->acr_unload_bl_desc);
+
+	for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
+		gsb->falcon_state[i] = NON_SECURE;
+
+	return ret;
+}
+
+void *
+gm200_secboot_dtor(struct nvkm_secboot *sb)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+
+	nvkm_gpuobj_del(&gsb->acr_unload_blob);
+
+	kfree(gsb->hsbl_blob);
+	nvkm_gpuobj_del(&gsb->acr_load_blob);
+	nvkm_gpuobj_del(&gsb->ls_blob);
+
+	nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
+	nvkm_gpuobj_del(&gsb->pgd);
+	nvkm_gpuobj_del(&gsb->inst);
+
+	return gsb;
+}
+
+
+static const struct nvkm_secboot_func
+gm200_secboot = {
+	.dtor = gm200_secboot_dtor,
+	.init = gm200_secboot_init,
+	.fini = gm200_secboot_fini,
+	.prepare_blobs = gm200_secboot_prepare_blobs,
+	.reset = gm200_secboot_reset,
+	.start = gm200_secboot_start,
+	.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
+			   BIT(NVKM_SECBOOT_FALCON_GPCCS),
+	.boot_falcon = NVKM_SECBOOT_FALCON_PMU,
+};
+
+/**
+ * gm200_fixup_bl_desc - just copy the BL descriptor
+ *
+ * Use the GM200 descriptor format by default.
+ */
+static void
+gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
+{
+	memcpy(ret, desc, sizeof(*desc));
+}
+
+static void
+gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
+			    struct hsflcn_acr_desc *desc)
+{
+	desc->ucode_blob_base = gsb->ls_blob->addr;
+	desc->ucode_blob_size = gsb->ls_blob->size;
+
+	desc->wpr_offset = 0;
+
+	/* WPR region information for the HS binary to set up */
+	desc->wpr_region_id = 1;
+	desc->regions.no_regions = 1;
+	desc->regions.region_props[0].region_id = 1;
+	desc->regions.region_props[0].start_addr = gsb->wpr_addr >> 8;
+	desc->regions.region_props[0].end_addr =
+		(gsb->wpr_addr + gsb->wpr_size) >> 8;
+}
+
+static const struct gm200_secboot_func
+gm200_secboot_func = {
+	.bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
+	.fixup_bl_desc = gm200_secboot_fixup_bl_desc,
+	.fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+};
+
+int
+gm200_secboot_new(struct nvkm_device *device, int index,
+		  struct nvkm_secboot **psb)
+{
+	int ret;
+	struct gm200_secboot *gsb;
+
+	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+	if (!gsb) {
+		psb = NULL;
+		return -ENOMEM;
+	}
+	*psb = &gsb->base;
+
+	ret = nvkm_secboot_ctor(&gm200_secboot, device, index, &gsb->base);
+	if (ret)
+		return ret;
+
+	gsb->func = &gm200_secboot_func;
+
+	return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
new file mode 100644
index 0000000..6843204
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <core/gpuobj.h>
+
+/*
+ * The BL header format used by GM20B's firmware is slightly different
+ * from the one of GM200. Fix the differences here.
+ */
+struct gm20b_flcn_bl_desc {
+	u32 reserved[4];
+	u32 signature[4];
+	u32 ctx_dma;
+	u32 code_dma_base;
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 sec_code_off;
+	u32 sec_code_size;
+	u32 code_entry_point;
+	u32 data_dma_base;
+	u32 data_size;
+};
+
+/**
+ * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
+ *
+ * There is only a slight format difference (DMA addresses being 32-bits and
+ * 256B-aligned) to address.
+ */
+static void
+gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
+{
+	struct gm20b_flcn_bl_desc *gdesc = ret;
+	u64 addr;
+
+	memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
+	memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
+	gdesc->ctx_dma = desc->ctx_dma;
+	addr = desc->code_dma_base.hi;
+	addr <<= 32;
+	addr |= desc->code_dma_base.lo;
+	gdesc->code_dma_base = lower_32_bits(addr >> 8);
+	gdesc->non_sec_code_off = desc->non_sec_code_off;
+	gdesc->non_sec_code_size = desc->non_sec_code_size;
+	gdesc->sec_code_off = desc->sec_code_off;
+	gdesc->sec_code_size = desc->sec_code_size;
+	gdesc->code_entry_point = desc->code_entry_point;
+	addr = desc->data_dma_base.hi;
+	addr <<= 32;
+	addr |= desc->data_dma_base.lo;
+	gdesc->data_dma_base = lower_32_bits(addr >> 8);
+	gdesc->data_size = desc->data_size;
+}
+
+static void
+gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
+			    struct hsflcn_acr_desc *desc)
+{
+	desc->ucode_blob_base = gsb->ls_blob->addr;
+	desc->ucode_blob_size = gsb->ls_blob->size;
+
+	desc->wpr_offset = 0;
+}
+
+static const struct gm200_secboot_func
+gm20b_secboot_func = {
+	.bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
+	.fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
+	.fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+};
+
+
+#ifdef CONFIG_ARCH_TEGRA
+#define TEGRA_MC_BASE				0x70019000
+#define MC_SECURITY_CARVEOUT2_CFG0		0xc58
+#define MC_SECURITY_CARVEOUT2_BOM_0		0xc5c
+#define MC_SECURITY_CARVEOUT2_BOM_HI_0		0xc60
+#define MC_SECURITY_CARVEOUT2_SIZE_128K		0xc64
+#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED	(1 << 1)
+/**
+ * sb_tegra_read_wpr() - read the WPR registers on Tegra
+ *
+ * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
+ * is reserved from system memory by the bootloader and irreversibly locked.
+ * This function reads the address and size of the pre-configured WPR region.
+ */
+static int
+gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+{
+	struct nvkm_secboot *sb = &gsb->base;
+	void __iomem *mc;
+	u32 cfg;
+
+	mc = ioremap(TEGRA_MC_BASE, 0xd00);
+	if (!mc) {
+		nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
+		return PTR_ERR(mc);
+	}
+	gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
+	      ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
+	gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
+		<< 17;
+	cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
+	iounmap(mc);
+
+	/* Check that WPR settings are valid */
+	if (gsb->wpr_size == 0) {
+		nvkm_error(&sb->subdev, "WPR region is empty\n");
+		return -EINVAL;
+	}
+
+	if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
+		nvkm_error(&sb->subdev, "WPR region not locked\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#else
+static int
+gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+{
+	nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
+	return -EINVAL;
+}
+#endif
+
+static int
+gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int acr_size;
+	int ret;
+
+	ret = gm20x_secboot_prepare_blobs(gsb);
+	if (ret)
+		return ret;
+
+	acr_size = gsb->acr_load_blob->size;
+	/*
+	 * On Tegra the WPR region is set by the bootloader. It is illegal for
+	 * the HS blob to be larger than this region.
+	 */
+	if (acr_size > gsb->wpr_size) {
+		nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
+		nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
+		nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
+static int
+gm20b_secboot_init(struct nvkm_secboot *sb)
+{
+	struct gm200_secboot *gsb = gm200_secboot(sb);
+	int ret;
+
+	ret = gm20b_tegra_read_wpr(gsb);
+	if (ret)
+		return ret;
+
+	return gm200_secboot_init(sb);
+}
+
+static const struct nvkm_secboot_func
+gm20b_secboot = {
+	.dtor = gm200_secboot_dtor,
+	.init = gm20b_secboot_init,
+	.prepare_blobs = gm20b_secboot_prepare_blobs,
+	.reset = gm200_secboot_reset,
+	.start = gm200_secboot_start,
+	.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
+	.boot_falcon = NVKM_SECBOOT_FALCON_PMU,
+};
+
+int
+gm20b_secboot_new(struct nvkm_device *device, int index,
+		  struct nvkm_secboot **psb)
+{
+	int ret;
+	struct gm200_secboot *gsb;
+
+	gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+	if (!gsb) {
+		psb = NULL;
+		return -ENOMEM;
+	}
+	*psb = &gsb->base;
+
+	ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base);
+	if (ret)
+		return ret;
+
+	gsb->func = &gm20b_secboot_func;
+
+	return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
new file mode 100644
index 0000000..f2b09de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_PRIV_H__
+#define __NVKM_SECBOOT_PRIV_H__
+
+#include <subdev/secboot.h>
+#include <subdev/mmu.h>
+
+struct nvkm_secboot_func {
+	int (*init)(struct nvkm_secboot *);
+	int (*fini)(struct nvkm_secboot *, bool suspend);
+	void *(*dtor)(struct nvkm_secboot *);
+	int (*prepare_blobs)(struct nvkm_secboot *);
+	int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+	int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+
+	/* ID of the falcon that will perform secure boot */
+	enum nvkm_secboot_falcon boot_falcon;
+	/* Bit-mask of IDs of managed falcons */
+	unsigned long managed_falcons;
+};
+
+int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_device *,
+		      int index, struct nvkm_secboot *);
+int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
+int nvkm_secboot_falcon_run(struct nvkm_secboot *);
+
+struct flcn_u64 {
+	u32 lo;
+	u32 hi;
+};
+static inline u64 flcn64_to_u64(const struct flcn_u64 f)
+{
+	return ((u64)f.hi) << 32 | f.lo;
+}
+
+/**
+ * struct gm200_flcn_bl_desc - DMEM bootloader descriptor
+ * @signature:		16B signature for secure code. 0s if no secure code
+ * @ctx_dma:		DMA context to be used by BL while loading code/data
+ * @code_dma_base:	256B-aligned Physical FB Address where code is located
+ *			(falcon's $xcbase register)
+ * @non_sec_code_off:	offset from code_dma_base where the non-secure code is
+ *                      located. The offset must be multiple of 256 to help perf
+ * @non_sec_code_size:	the size of the nonSecure code part.
+ * @sec_code_off:	offset from code_dma_base where the secure code is
+ *                      located. The offset must be multiple of 256 to help perf
+ * @sec_code_size:	offset from code_dma_base where the secure code is
+ *                      located. The offset must be multiple of 256 to help perf
+ * @code_entry_point:	code entry point which will be invoked by BL after
+ *                      code is loaded.
+ * @data_dma_base:	256B aligned Physical FB Address where data is located.
+ *			(falcon's $xdbase register)
+ * @data_size:		size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct gm200_flcn_bl_desc {
+	u32 reserved[4];
+	u32 signature[4];
+	u32 ctx_dma;
+	struct flcn_u64 code_dma_base;
+	u32 non_sec_code_off;
+	u32 non_sec_code_size;
+	u32 sec_code_off;
+	u32 sec_code_size;
+	u32 code_entry_point;
+	struct flcn_u64 data_dma_base;
+	u32 data_size;
+};
+
+/**
+ * struct hsflcn_acr_desc - data section of the HS firmware
+ *
+ * This header is to be copied at the beginning of DMEM by the HS bootloader.
+ *
+ * @signature:		signature of ACR ucode
+ * @wpr_region_id:	region ID holding the WPR header and its details
+ * @wpr_offset:		offset from the WPR region holding the wpr header
+ * @regions:		region descriptors
+ * @nonwpr_ucode_blob_size:	size of LS blob
+ * @nonwpr_ucode_blob_start:	FB location of LS blob is
+ */
+struct hsflcn_acr_desc {
+	union {
+		u8 reserved_dmem[0x200];
+		u32 signatures[4];
+	} ucode_reserved_space;
+	u32 wpr_region_id;
+	u32 wpr_offset;
+	u32 mmu_mem_range;
+#define FLCN_ACR_MAX_REGIONS 2
+	struct {
+		u32 no_regions;
+		struct {
+			u32 start_addr;
+			u32 end_addr;
+			u32 region_id;
+			u32 read_mask;
+			u32 write_mask;
+			u32 client_mask;
+		} region_props[FLCN_ACR_MAX_REGIONS];
+	} regions;
+	u32 ucode_blob_size;
+	u64 ucode_blob_base __aligned(8);
+	struct {
+		u32 vpr_enabled;
+		u32 vpr_start;
+		u32 vpr_end;
+		u32 hdcp_policies;
+	} vpr_desc;
+};
+
+/**
+ * Contains the whole secure boot state, allowing it to be performed as needed
+ * @wpr_addr:		physical address of the WPR region
+ * @wpr_size:		size in bytes of the WPR region
+ * @ls_blob:		LS blob of all the LS firmwares, signatures, bootloaders
+ * @ls_blob_size:	size of the LS blob
+ * @ls_blob_nb_regions:	number of LS firmwares that will be loaded
+ * @acr_blob:		HS blob
+ * @acr_blob_vma:	mapping of the HS blob into the secure falcon's VM
+ * @acr_bl_desc:	bootloader descriptor of the HS blob
+ * @hsbl_blob:		HS blob bootloader
+ * @inst:		instance block for HS falcon
+ * @pgd:		page directory for the HS falcon
+ * @vm:			address space used by the HS falcon
+ * @bl_desc_size:	size of the BL descriptor used by this chip.
+ * @fixup_bl_desc:	hook that generates the proper BL descriptor format from
+ *			the generic GM200 format into a data array of size
+ *			bl_desc_size
+ */
+struct gm200_secboot {
+	struct nvkm_secboot base;
+	const struct gm200_secboot_func *func;
+
+	/*
+	 * Address and size of the WPR region. On dGPU this will be the
+	 * address of the LS blob. On Tegra this is a fixed region set by the
+	 * bootloader
+	 */
+	u64 wpr_addr;
+	u32 wpr_size;
+
+	/*
+	 * HS FW - lock WPR region (dGPU only) and load LS FWs
+	 * on Tegra the HS FW copies the LS blob into the fixed WPR instead
+	 */
+	struct nvkm_gpuobj *acr_load_blob;
+	struct gm200_flcn_bl_desc acr_load_bl_desc;
+
+	/* HS FW - unlock WPR region (dGPU only) */
+	struct nvkm_gpuobj *acr_unload_blob;
+	struct gm200_flcn_bl_desc acr_unload_bl_desc;
+
+	/* HS bootloader */
+	void *hsbl_blob;
+
+	/* LS FWs, to be loaded by the HS ACR */
+	struct nvkm_gpuobj *ls_blob;
+
+	/* Instance block & address space used for HS FW execution */
+	struct nvkm_gpuobj *inst;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	/* To keep track of the state of all managed falcons */
+	enum {
+		/* In non-secure state, no firmware loaded, no privileges*/
+		NON_SECURE = 0,
+		/* In low-secure mode and ready to be started */
+		RESET,
+		/* In low-secure mode and running */
+		RUNNING,
+	} falcon_state[NVKM_SECBOOT_FALCON_END];
+
+};
+#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+
+struct gm200_secboot_func {
+	/*
+	 * Size of the bootloader descriptor for this chip. A block of this
+	 * size is allocated before booting a falcon and the fixup_bl_desc
+	 * callback is called on it
+	 */
+	u32 bl_desc_size;
+	void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *);
+
+	/*
+	 * Chip-specific modifications of the HS descriptor can be done here.
+	 * On dGPU this is used to fill the information about the WPR region
+	 * we want the HS FW to set up.
+	 */
+	void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+};
+
+int gm200_secboot_init(struct nvkm_secboot *);
+void *gm200_secboot_dtor(struct nvkm_secboot *);
+int gm200_secboot_reset(struct nvkm_secboot *, u32);
+int gm200_secboot_start(struct nvkm_secboot *, u32);
+
+int gm20x_secboot_prepare_blobs(struct gm200_secboot *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index b035c6e..c340762 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -3,3 +3,4 @@
 nvkm-y += nvkm/subdev/volt/nv40.o
 nvkm-y += nvkm/subdev/volt/gk104.o
 nvkm-y += nvkm/subdev/volt/gk20a.o
+nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index fd56c64..d554455 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -24,21 +24,9 @@
 
 #include <core/tegra.h>
 
-struct cvb_coef {
-	int c0;
-	int c1;
-	int c2;
-	int c3;
-	int c4;
-	int c5;
-};
+#include "gk20a.h"
 
-struct gk20a_volt {
-	struct nvkm_volt base;
-	struct regulator *vdd;
-};
-
-const struct cvb_coef gk20a_cvb_coef[] = {
+static const struct cvb_coef gk20a_cvb_coef[] = {
 	/* MHz,        c0,     c1,   c2,    c3,     c4,   c5 */
 	/*  72 */ { 1209886, -36468,  515,   417, -13123,  203},
 	/* 108 */ { 1130804, -27659,  296,   298, -10834,  221},
@@ -89,7 +77,7 @@
 	return mv;
 }
 
-static int
+int
 gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
 {
 	int mv;
@@ -100,7 +88,7 @@
 	return mv * 1000;
 }
 
-static int
+int
 gk20a_volt_vid_get(struct nvkm_volt *base)
 {
 	struct gk20a_volt *volt = gk20a_volt(base);
@@ -115,7 +103,7 @@
 	return -EINVAL;
 }
 
-static int
+int
 gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
 	struct gk20a_volt *volt = gk20a_volt(base);
@@ -125,7 +113,7 @@
 	return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
-static int
+int
 gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
 	struct gk20a_volt *volt = gk20a_volt(base);
@@ -155,30 +143,25 @@
 };
 
 int
-gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+_gk20a_volt_ctor(struct nvkm_device *device, int index,
+		 const struct cvb_coef *coefs, int nb_coefs,
+		 struct gk20a_volt *volt)
 {
 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
-	struct gk20a_volt *volt;
 	int i, uv;
 
-	if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
-		return -ENOMEM;
-
 	nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
-	*pvolt = &volt->base;
 
 	uv = regulator_get_voltage(tdev->vdd);
-	nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
+	nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
 
 	volt->vdd = tdev->vdd;
 
-	volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
-	nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
-		   volt->base.vid_nr);
+	volt->base.vid_nr = nb_coefs;
 	for (i = 0; i < volt->base.vid_nr; i++) {
 		volt->base.vid[i].vid = i;
 		volt->base.vid[i].uv =
-			gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+			gk20a_volt_calc_voltage(&coefs[i],
 						tdev->gpu_speedo);
 		nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
 			   volt->base.vid[i].vid, volt->base.vid[i].uv);
@@ -186,3 +169,17 @@
 
 	return 0;
 }
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+	struct gk20a_volt *volt;
+
+	volt = kzalloc(sizeof(*volt), GFP_KERNEL);
+	if (!volt)
+		return -ENOMEM;
+	*pvolt = &volt->base;
+
+	return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+				ARRAY_SIZE(gk20a_cvb_coef), volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
new file mode 100644
index 0000000..0fa3b50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __GK20A_VOLT_H__
+#define __GK20A_VOLT_H__
+
+struct cvb_coef {
+	int c0;
+	int c1;
+	int c2;
+	int c3;
+	int c4;
+	int c5;
+};
+
+struct gk20a_volt {
+	struct nvkm_volt base;
+	struct regulator *vdd;
+};
+
+int _gk20a_volt_ctor(struct nvkm_device *device, int index,
+		     const struct cvb_coef *coefs, int nb_coefs,
+		     struct gk20a_volt *volt);
+
+int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
+int gk20a_volt_vid_get(struct nvkm_volt *volt);
+int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
+int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
new file mode 100644
index 0000000..49b5ecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+#include "gk20a.h"
+
+#include <core/tegra.h>
+
+const struct cvb_coef gm20b_cvb_coef[] = {
+	/* KHz,             c0,      c1,   c2 */
+	/*  76800 */ { 1786666,  -85625, 1632 },
+	/* 153600 */ { 1846729,  -87525, 1632 },
+	/* 230400 */ { 1910480,  -89425, 1632 },
+	/* 307200 */ { 1977920,  -91325, 1632 },
+	/* 384000 */ { 2049049,  -93215, 1632 },
+	/* 460800 */ { 2122872,  -95095, 1632 },
+	/* 537600 */ { 2201331,  -96985, 1632 },
+	/* 614400 */ { 2283479,  -98885, 1632 },
+	/* 691200 */ { 2369315, -100785, 1632 },
+	/* 768000 */ { 2458841, -102685, 1632 },
+	/* 844800 */ { 2550821, -104555, 1632 },
+	/* 921600 */ { 2647676, -106455, 1632 },
+};
+
+int
+gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+	struct gk20a_volt *volt;
+
+	volt = kzalloc(sizeof(*volt), GFP_KERNEL);
+	if (!volt)
+		return -ENOMEM;
+	*pvolt = &volt->base;
+
+	return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+				ARRAY_SIZE(gm20b_cvb_coef), volt);
+}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 336ad4d..73241c4 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,7 +2,6 @@
 	tristate "OMAP DRM"
 	depends on DRM
 	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
-	select OMAP2_DSS
 	select DRM_KMS_HELPER
 	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index fe4c222..48b7b75 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -6,7 +6,7 @@
 obj-y += dss/
 obj-y += displays/
 
-ccflags-y := -Iinclude/drm -Werror
+ccflags-y := -Iinclude/drm
 omapdrm-y := omap_drv.o \
 	omap_irq.o \
 	omap_debugfs.o \
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index d811e6d..747f26a 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -236,46 +236,6 @@
 	.detect		= dvic_detect,
 };
 
-static int dvic_probe_pdata(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct connector_dvi_platform_data *pdata;
-	struct omap_dss_device *in, *dssdev;
-	int i2c_bus_num;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	i2c_bus_num = pdata->i2c_bus_num;
-
-	if (i2c_bus_num != -1) {
-		struct i2c_adapter *adapter;
-
-		adapter = i2c_get_adapter(i2c_bus_num);
-		if (!adapter) {
-			dev_err(&pdev->dev,
-					"Failed to get I2C adapter, bus %d\n",
-					i2c_bus_num);
-			return -EPROBE_DEFER;
-		}
-
-		ddata->i2c_adapter = adapter;
-	}
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		i2c_put_adapter(ddata->i2c_adapter);
-
-		dev_err(&pdev->dev, "Failed to find video source\n");
-		return -EPROBE_DEFER;
-	}
-
-	ddata->in = in;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int dvic_probe_of(struct platform_device *pdev)
 {
 	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -319,17 +279,12 @@
 
 	platform_set_drvdata(pdev, ddata);
 
-	if (dev_get_platdata(&pdev->dev)) {
-		r = dvic_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = dvic_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = dvic_probe_of(pdev);
+	if (r)
+		return r;
 
 	ddata->timings = dvic_default_timings;
 
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 6ee4129..225fd8d 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -206,30 +206,6 @@
 	.set_hdmi_infoframe	= hdmic_set_infoframe,
 };
 
-static int hdmic_probe_pdata(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct connector_hdmi_platform_data *pdata;
-	struct omap_dss_device *in, *dssdev;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	ddata->hpd_gpio = -ENODEV;
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&pdev->dev, "Failed to find video source\n");
-		return -EPROBE_DEFER;
-	}
-
-	ddata->in = in;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int hdmic_probe_of(struct platform_device *pdev)
 {
 	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -268,17 +244,12 @@
 	platform_set_drvdata(pdev, ddata);
 	ddata->dev = &pdev->dev;
 
-	if (dev_get_platdata(&pdev->dev)) {
-		r = hdmic_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = hdmic_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = hdmic_probe_of(pdev);
+	if (r)
+		return r;
 
 	if (gpio_is_valid(ddata->hpd_gpio)) {
 		r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d9048b3..2fd5602 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -166,32 +166,6 @@
 	.get_timings	= tfp410_get_timings,
 };
 
-static int tfp410_probe_pdata(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct encoder_tfp410_platform_data *pdata;
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	ddata->pd_gpio = pdata->power_down_gpio;
-
-	ddata->data_lines = pdata->data_lines;
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&pdev->dev, "Failed to find video source\n");
-		return -ENODEV;
-	}
-
-	ddata->in = in;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int tfp410_probe_of(struct platform_device *pdev)
 {
 	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -231,17 +205,12 @@
 
 	platform_set_drvdata(pdev, ddata);
 
-	if (dev_get_platdata(&pdev->dev)) {
-		r = tfp410_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = tfp410_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = tfp410_probe_of(pdev);
+	if (r)
+		return r;
 
 	if (gpio_is_valid(ddata->pd_gpio)) {
 		r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 990af6b..916a899 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -13,9 +13,8 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/gpio.h>
 #include <linux/platform_device.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #include <video/omapdss.h>
 #include <video/omap-panel-data.h>
@@ -24,9 +23,9 @@
 	struct omap_dss_device dssdev;
 	struct omap_dss_device *in;
 
-	int ct_cp_hpd_gpio;
-	int ls_oe_gpio;
-	int hpd_gpio;
+	struct gpio_desc *ct_cp_hpd_gpio;
+	struct gpio_desc *ls_oe_gpio;
+	struct gpio_desc *hpd_gpio;
 
 	struct omap_video_timings timings;
 };
@@ -47,7 +46,7 @@
 	dst->src = dssdev;
 	dssdev->dst = dst;
 
-	gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
+	gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
 	/* DC-DC converter needs at max 300us to get to 90% of 5V */
 	udelay(300);
 
@@ -65,7 +64,7 @@
 	if (dst != dssdev->dst)
 		return;
 
-	gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
+	gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
 
 	dst->src = NULL;
 	dssdev->dst = NULL;
@@ -145,16 +144,14 @@
 	struct omap_dss_device *in = ddata->in;
 	int r;
 
-	if (!gpio_get_value_cansleep(ddata->hpd_gpio))
+	if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
 		return -ENODEV;
 
-	if (gpio_is_valid(ddata->ls_oe_gpio))
-		gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
+	gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
 
 	r = in->ops.hdmi->read_edid(in, edid, len);
 
-	if (gpio_is_valid(ddata->ls_oe_gpio))
-		gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
+	gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
 
 	return r;
 }
@@ -163,7 +160,7 @@
 {
 	struct panel_drv_data *ddata = to_panel_data(dssdev);
 
-	return gpio_get_value_cansleep(ddata->hpd_gpio);
+	return gpiod_get_value_cansleep(ddata->hpd_gpio);
 }
 
 static int tpd_set_infoframe(struct omap_dss_device *dssdev,
@@ -201,63 +198,11 @@
 	.set_hdmi_mode		= tpd_set_hdmi_mode,
 };
 
-static int tpd_probe_pdata(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct encoder_tpd12s015_platform_data *pdata;
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	ddata->ct_cp_hpd_gpio = pdata->ct_cp_hpd_gpio;
-	ddata->ls_oe_gpio = pdata->ls_oe_gpio;
-	ddata->hpd_gpio = pdata->hpd_gpio;
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&pdev->dev, "Failed to find video source\n");
-		return -ENODEV;
-	}
-
-	ddata->in = in;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int tpd_probe_of(struct platform_device *pdev)
 {
 	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
 	struct device_node *node = pdev->dev.of_node;
 	struct omap_dss_device *in;
-	int gpio;
-
-	/* CT CP HPD GPIO */
-	gpio = of_get_gpio(node, 0);
-	if (!gpio_is_valid(gpio)) {
-		dev_err(&pdev->dev, "failed to parse CT CP HPD gpio\n");
-		return gpio;
-	}
-	ddata->ct_cp_hpd_gpio = gpio;
-
-	/* LS OE GPIO */
-	gpio = of_get_gpio(node, 1);
-	if (gpio_is_valid(gpio) || gpio == -ENOENT) {
-		ddata->ls_oe_gpio = gpio;
-	} else {
-		dev_err(&pdev->dev, "failed to parse LS OE gpio\n");
-		return gpio;
-	}
-
-	/* HPD GPIO */
-	gpio = of_get_gpio(node, 2);
-	if (!gpio_is_valid(gpio)) {
-		dev_err(&pdev->dev, "failed to parse HPD gpio\n");
-		return gpio;
-	}
-	ddata->hpd_gpio = gpio;
 
 	in = omapdss_of_find_source_for_first_ep(node);
 	if (IS_ERR(in)) {
@@ -275,6 +220,7 @@
 	struct omap_dss_device *in, *dssdev;
 	struct panel_drv_data *ddata;
 	int r;
+	struct gpio_desc *gpio;
 
 	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
 	if (!ddata)
@@ -282,35 +228,35 @@
 
 	platform_set_drvdata(pdev, ddata);
 
-	if (dev_get_platdata(&pdev->dev)) {
-		r = tpd_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = tpd_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
 
-	r = devm_gpio_request_one(&pdev->dev, ddata->ct_cp_hpd_gpio,
-			GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd");
+	r = tpd_probe_of(pdev);
 	if (r)
+		return r;
+
+
+	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+		 GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
 		goto err_gpio;
 
-	if (gpio_is_valid(ddata->ls_oe_gpio)) {
-		r = devm_gpio_request_one(&pdev->dev, ddata->ls_oe_gpio,
-				GPIOF_OUT_INIT_LOW, "hdmi_ls_oe");
-		if (r)
-			goto err_gpio;
-	}
+	ddata->ct_cp_hpd_gpio = gpio;
 
-	r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
-			GPIOF_DIR_IN, "hdmi_hpd");
-	if (r)
+	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+		 GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
 		goto err_gpio;
 
+	ddata->ls_oe_gpio = gpio;
+
+	gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
+		GPIOD_IN);
+	if (IS_ERR(gpio))
+		goto err_gpio;
+
+	ddata->hpd_gpio = gpio;
+
 	dssdev = &ddata->dssdev;
 	dssdev->ops.hdmi = &tpd_hdmi_ops;
 	dssdev->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 3414c26..36485c2 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1127,40 +1127,6 @@
 	.memory_read	= dsicm_memory_read,
 };
 
-static int dsicm_probe_pdata(struct platform_device *pdev)
-{
-	const struct panel_dsicm_platform_data *pdata;
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&pdev->dev, "failed to find video source\n");
-		return -EPROBE_DEFER;
-	}
-	ddata->in = in;
-
-	ddata->reset_gpio = pdata->reset_gpio;
-
-	if (pdata->use_ext_te)
-		ddata->ext_te_gpio = pdata->ext_te_gpio;
-	else
-		ddata->ext_te_gpio = -1;
-
-	ddata->ulps_timeout = pdata->ulps_timeout;
-
-	ddata->use_dsi_backlight = pdata->use_dsi_backlight;
-
-	ddata->pin_config = pdata->pin_config;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int dsicm_probe_of(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
@@ -1214,17 +1180,12 @@
 	platform_set_drvdata(pdev, ddata);
 	ddata->pdev = pdev;
 
-	if (dev_get_platdata(dev)) {
-		r = dsicm_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = dsicm_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = dsicm_probe_of(pdev);
+	if (r)
+		return r;
 
 	ddata->timings.x_res = 864;
 	ddata->timings.y_res = 480;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 18eb60e..458f77b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -240,44 +240,6 @@
 	.get_resolution	= omapdss_default_get_resolution,
 };
 
-static int lb035q02_probe_pdata(struct spi_device *spi)
-{
-	const struct panel_lb035q02_platform_data *pdata;
-	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-	struct omap_dss_device *dssdev, *in;
-	int r;
-
-	pdata = dev_get_platdata(&spi->dev);
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&spi->dev, "failed to find video source '%s'\n",
-				pdata->source);
-		return -EPROBE_DEFER;
-	}
-
-	ddata->in = in;
-
-	ddata->data_lines = pdata->data_lines;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
-					GPIOF_OUT_INIT_LOW, "panel enable");
-	if (r)
-		goto err_gpio;
-
-	ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
-	ddata->backlight_gpio = pdata->backlight_gpio;
-
-	return 0;
-err_gpio:
-	omap_dss_put_device(ddata->in);
-	return r;
-}
-
 static int lb035q02_probe_of(struct spi_device *spi)
 {
 	struct device_node *node = spi->dev.of_node;
@@ -320,17 +282,12 @@
 
 	ddata->spi = spi;
 
-	if (dev_get_platdata(&spi->dev)) {
-		r = lb035q02_probe_pdata(spi);
-		if (r)
-			return r;
-	} else if (spi->dev.of_node) {
-		r = lb035q02_probe_of(spi);
-		if (r)
-			return r;
-	} else {
+	if (!spi->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = lb035q02_probe_of(spi);
+	if (r)
+		return r;
 
 	if (gpio_is_valid(ddata->backlight_gpio)) {
 		r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 8a928c9..780cb26 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -19,7 +19,6 @@
 #include <linux/of_gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-data.h>
 
 struct panel_drv_data {
 	struct omap_dss_device	dssdev;
@@ -232,34 +231,6 @@
 	.get_resolution	= omapdss_default_get_resolution,
 };
 
-
-static int nec_8048_probe_pdata(struct spi_device *spi)
-{
-	const struct panel_nec_nl8048hl11_platform_data *pdata;
-	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&spi->dev);
-
-	ddata->qvga_gpio = pdata->qvga_gpio;
-	ddata->res_gpio = pdata->res_gpio;
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&spi->dev, "failed to find video source '%s'\n",
-				pdata->source);
-		return -EPROBE_DEFER;
-	}
-	ddata->in = in;
-
-	ddata->data_lines = pdata->data_lines;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int nec_8048_probe_of(struct spi_device *spi)
 {
 	struct device_node *node = spi->dev.of_node;
@@ -315,17 +286,12 @@
 
 	ddata->spi = spi;
 
-	if (dev_get_platdata(&spi->dev)) {
-		r = nec_8048_probe_pdata(spi);
-		if (r)
-			return r;
-	} else if (spi->dev.of_node) {
-		r = nec_8048_probe_of(spi);
-		if (r)
-			return r;
-	} else {
+	if (!spi->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = nec_8048_probe_of(spi);
+	if (r)
+		return r;
 
 	if (gpio_is_valid(ddata->qvga_gpio)) {
 		r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index abfd1f6..529a017 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -18,7 +18,6 @@
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-data.h>
 
 struct panel_drv_data {
 	struct omap_dss_device dssdev;
@@ -197,73 +196,6 @@
 	.get_resolution	= omapdss_default_get_resolution,
 };
 
-static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
-		  char *desc, struct gpio_desc **gpiod)
-{
-	struct gpio_desc *gd;
-	int r;
-
-	*gpiod = NULL;
-
-	r = devm_gpio_request_one(dev, gpio, flags, desc);
-	if (r)
-		return r == -ENOENT ? 0 : r;
-
-	gd = gpio_to_desc(gpio);
-	if (IS_ERR(gd))
-		return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
-
-	*gpiod = gd;
-	return 0;
-}
-
-static int sharp_ls_probe_pdata(struct platform_device *pdev)
-{
-	const struct panel_sharp_ls037v7dw01_platform_data *pdata;
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct omap_dss_device *dssdev, *in;
-	int r;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&pdev->dev, "failed to find video source '%s'\n",
-				pdata->source);
-		return -EPROBE_DEFER;
-	}
-
-	ddata->in = in;
-
-	ddata->data_lines = pdata->data_lines;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
-		"lcd MO", &ddata->mo_gpio);
-	if (r)
-		return r;
-	r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
-		"lcd LR", &ddata->lr_gpio);
-	if (r)
-		return r;
-	r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
-		"lcd UD", &ddata->ud_gpio);
-	if (r)
-		return r;
-	r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
-		"lcd RESB", &ddata->resb_gpio);
-	if (r)
-		return r;
-	r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
-		"lcd INI", &ddata->ini_gpio);
-	if (r)
-		return r;
-
-	return 0;
-}
-
 static  int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
 	const char *desc, struct gpio_desc **gpiod)
 {
@@ -340,17 +272,12 @@
 
 	platform_set_drvdata(pdev, ddata);
 
-	if (dev_get_platdata(&pdev->dev)) {
-		r = sharp_ls_probe_pdata(pdev);
-		if (r)
-			return r;
-	} else if (pdev->dev.of_node) {
-		r = sharp_ls_probe_of(pdev);
-		if (r)
-			return r;
-	} else {
+	if (!pdev->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = sharp_ls_probe_of(pdev);
+	if (r)
+		return r;
 
 	ddata->videomode = sharp_ls_timings;
 
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 4d657f3..bd8d850 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -29,7 +29,6 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <video/omapdss.h>
-#include <video/omap-panel-data.h>
 
 struct panel_drv_data {
 	struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@
 	.check_timings	= td028ttec1_panel_check_timings,
 };
 
-static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
-{
-	const struct panel_tpo_td028ttec1_platform_data *pdata;
-	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&spi->dev);
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&spi->dev, "failed to find video source '%s'\n",
-				pdata->source);
-		return -EPROBE_DEFER;
-	}
-
-	ddata->in = in;
-
-	ddata->data_lines = pdata->data_lines;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int td028ttec1_probe_of(struct spi_device *spi)
 {
 	struct device_node *node = spi->dev.of_node;
@@ -432,17 +406,12 @@
 
 	ddata->spi_dev = spi;
 
-	if (dev_get_platdata(&spi->dev)) {
-		r = td028ttec1_panel_probe_pdata(spi);
-		if (r)
-			return r;
-	} else if (spi->dev.of_node) {
-		r = td028ttec1_probe_of(spi);
-		if (r)
-			return r;
-	} else {
+	if (!spi->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = td028ttec1_probe_of(spi);
+	if (r)
+		return r;
 
 	ddata->videomode = td028ttec1_panel_timings;
 
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 68e3b68..03e2beb 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -20,7 +20,6 @@
 #include <linux/of_gpio.h>
 
 #include <video/omapdss.h>
-#include <video/omap-panel-data.h>
 
 #define TPO_R02_MODE(x)		((x) & 7)
 #define TPO_R02_MODE_800x480	7
@@ -464,33 +463,6 @@
 	.get_resolution	= omapdss_default_get_resolution,
 };
 
-
-static int tpo_td043_probe_pdata(struct spi_device *spi)
-{
-	const struct panel_tpo_td043mtea1_platform_data *pdata;
-	struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-	struct omap_dss_device *dssdev, *in;
-
-	pdata = dev_get_platdata(&spi->dev);
-
-	ddata->nreset_gpio = pdata->nreset_gpio;
-
-	in = omap_dss_find_output(pdata->source);
-	if (in == NULL) {
-		dev_err(&spi->dev, "failed to find video source '%s'\n",
-				pdata->source);
-		return -EPROBE_DEFER;
-	}
-	ddata->in = in;
-
-	ddata->data_lines = pdata->data_lines;
-
-	dssdev = &ddata->dssdev;
-	dssdev->name = pdata->name;
-
-	return 0;
-}
-
 static int tpo_td043_probe_of(struct spi_device *spi)
 {
 	struct device_node *node = spi->dev.of_node;
@@ -541,17 +513,12 @@
 
 	ddata->spi = spi;
 
-	if (dev_get_platdata(&spi->dev)) {
-		r = tpo_td043_probe_pdata(spi);
-		if (r)
-			return r;
-	} else if (spi->dev.of_node) {
-		r = tpo_td043_probe_of(spi);
-		if (r)
-			return r;
-	} else {
+	if (!spi->dev.of_node)
 		return -ENODEV;
-	}
+
+	r = tpo_td043_probe_of(spi);
+	if (r)
+		return r;
 
 	ddata->mode = TPO_R02_MODE_800x480;
 	memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index b5136d3..b651ec9 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -3,9 +3,6 @@
 # Core DSS files
 omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
 	output.o dss-of.o pll.o video-pll.o
-# DSS compat layer files
-omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
-	dispc-compat.o display-sysfs.o
 omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
 omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
 omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/apply.c b/drivers/gpu/drm/omapdrm/dss/apply.c
deleted file mode 100644
index 663ccc3..0000000
--- a/drivers/gpu/drm/omapdrm/dss/apply.c
+++ /dev/null
@@ -1,1702 +0,0 @@
-/*
- * Copyright (C) 2011 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "APPLY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-#include "dispc-compat.h"
-
-/*
- * We have 4 levels of cache for the dispc settings. First two are in SW and
- * the latter two in HW.
- *
- *       set_info()
- *          v
- * +--------------------+
- * |     user_info      |
- * +--------------------+
- *          v
- *        apply()
- *          v
- * +--------------------+
- * |       info         |
- * +--------------------+
- *          v
- *      write_regs()
- *          v
- * +--------------------+
- * |  shadow registers  |
- * +--------------------+
- *          v
- * VFP or lcd/digit_enable
- *          v
- * +--------------------+
- * |      registers     |
- * +--------------------+
- */
-
-struct ovl_priv_data {
-
-	bool user_info_dirty;
-	struct omap_overlay_info user_info;
-
-	bool info_dirty;
-	struct omap_overlay_info info;
-
-	bool shadow_info_dirty;
-
-	bool extra_info_dirty;
-	bool shadow_extra_info_dirty;
-
-	bool enabled;
-	u32 fifo_low, fifo_high;
-
-	/*
-	 * True if overlay is to be enabled. Used to check and calculate configs
-	 * for the overlay before it is enabled in the HW.
-	 */
-	bool enabling;
-};
-
-struct mgr_priv_data {
-
-	bool user_info_dirty;
-	struct omap_overlay_manager_info user_info;
-
-	bool info_dirty;
-	struct omap_overlay_manager_info info;
-
-	bool shadow_info_dirty;
-
-	/* If true, GO bit is up and shadow registers cannot be written.
-	 * Never true for manual update displays */
-	bool busy;
-
-	/* If true, dispc output is enabled */
-	bool updating;
-
-	/* If true, a display is enabled using this manager */
-	bool enabled;
-
-	bool extra_info_dirty;
-	bool shadow_extra_info_dirty;
-
-	struct omap_video_timings timings;
-	struct dss_lcd_mgr_config lcd_config;
-
-	void (*framedone_handler)(void *);
-	void *framedone_handler_data;
-};
-
-static struct {
-	struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
-	struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
-
-	bool irq_enabled;
-} dss_data;
-
-/* protects dss_data */
-static spinlock_t data_lock;
-/* lock for blocking functions */
-static DEFINE_MUTEX(apply_lock);
-static DECLARE_COMPLETION(extra_updated_completion);
-
-static void dss_register_vsync_isr(void);
-
-static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
-{
-	return &dss_data.ovl_priv_data_array[ovl->id];
-}
-
-static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
-{
-	return &dss_data.mgr_priv_data_array[mgr->id];
-}
-
-static void apply_init_priv(void)
-{
-	const int num_ovls = dss_feat_get_num_ovls();
-	struct mgr_priv_data *mp;
-	int i;
-
-	spin_lock_init(&data_lock);
-
-	for (i = 0; i < num_ovls; ++i) {
-		struct ovl_priv_data *op;
-
-		op = &dss_data.ovl_priv_data_array[i];
-
-		op->info.color_mode = OMAP_DSS_COLOR_RGB16;
-		op->info.rotation_type = OMAP_DSS_ROT_DMA;
-
-		op->info.global_alpha = 255;
-
-		switch (i) {
-		case 0:
-			op->info.zorder = 0;
-			break;
-		case 1:
-			op->info.zorder =
-				dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
-			break;
-		case 2:
-			op->info.zorder =
-				dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
-			break;
-		case 3:
-			op->info.zorder =
-				dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
-			break;
-		}
-
-		op->user_info = op->info;
-	}
-
-	/*
-	 * Initialize some of the lcd_config fields for TV manager, this lets
-	 * us prevent checking if the manager is LCD or TV at some places
-	 */
-	mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
-
-	mp->lcd_config.video_port_width = 24;
-	mp->lcd_config.clock_info.lck_div = 1;
-	mp->lcd_config.clock_info.pck_div = 1;
-}
-
-/*
- * A LCD manager's stallmode decides whether it is in manual or auto update. TV
- * manager is always auto update, stallmode field for TV manager is false by
- * default
- */
-static bool ovl_manual_update(struct omap_overlay *ovl)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
-
-	return mp->lcd_config.stallmode;
-}
-
-static bool mgr_manual_update(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	return mp->lcd_config.stallmode;
-}
-
-static int dss_check_settings_low(struct omap_overlay_manager *mgr,
-		bool applying)
-{
-	struct omap_overlay_info *oi;
-	struct omap_overlay_manager_info *mi;
-	struct omap_overlay *ovl;
-	struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
-	struct ovl_priv_data *op;
-	struct mgr_priv_data *mp;
-
-	mp = get_mgr_priv(mgr);
-
-	if (!mp->enabled)
-		return 0;
-
-	if (applying && mp->user_info_dirty)
-		mi = &mp->user_info;
-	else
-		mi = &mp->info;
-
-	/* collect the infos to be tested into the array */
-	list_for_each_entry(ovl, &mgr->overlays, list) {
-		op = get_ovl_priv(ovl);
-
-		if (!op->enabled && !op->enabling)
-			oi = NULL;
-		else if (applying && op->user_info_dirty)
-			oi = &op->user_info;
-		else
-			oi = &op->info;
-
-		ois[ovl->id] = oi;
-	}
-
-	return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
-}
-
-/*
- * check manager and overlay settings using overlay_info from data->info
- */
-static int dss_check_settings(struct omap_overlay_manager *mgr)
-{
-	return dss_check_settings_low(mgr, false);
-}
-
-/*
- * check manager and overlay settings using overlay_info from ovl->info if
- * dirty and from data->info otherwise
- */
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
-{
-	return dss_check_settings_low(mgr, true);
-}
-
-static bool need_isr(void)
-{
-	const int num_mgrs = dss_feat_get_num_mgrs();
-	int i;
-
-	for (i = 0; i < num_mgrs; ++i) {
-		struct omap_overlay_manager *mgr;
-		struct mgr_priv_data *mp;
-		struct omap_overlay *ovl;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mp->enabled)
-			continue;
-
-		if (mgr_manual_update(mgr)) {
-			/* to catch FRAMEDONE */
-			if (mp->updating)
-				return true;
-		} else {
-			/* to catch GO bit going down */
-			if (mp->busy)
-				return true;
-
-			/* to write new values to registers */
-			if (mp->info_dirty)
-				return true;
-
-			/* to set GO bit */
-			if (mp->shadow_info_dirty)
-				return true;
-
-			/*
-			 * NOTE: we don't check extra_info flags for disabled
-			 * managers, once the manager is enabled, the extra_info
-			 * related manager changes will be taken in by HW.
-			 */
-
-			/* to write new values to registers */
-			if (mp->extra_info_dirty)
-				return true;
-
-			/* to set GO bit */
-			if (mp->shadow_extra_info_dirty)
-				return true;
-
-			list_for_each_entry(ovl, &mgr->overlays, list) {
-				struct ovl_priv_data *op;
-
-				op = get_ovl_priv(ovl);
-
-				/*
-				 * NOTE: we check extra_info flags even for
-				 * disabled overlays, as extra_infos need to be
-				 * always written.
-				 */
-
-				/* to write new values to registers */
-				if (op->extra_info_dirty)
-					return true;
-
-				/* to set GO bit */
-				if (op->shadow_extra_info_dirty)
-					return true;
-
-				if (!op->enabled)
-					continue;
-
-				/* to write new values to registers */
-				if (op->info_dirty)
-					return true;
-
-				/* to set GO bit */
-				if (op->shadow_info_dirty)
-					return true;
-			}
-		}
-	}
-
-	return false;
-}
-
-static bool need_go(struct omap_overlay_manager *mgr)
-{
-	struct omap_overlay *ovl;
-	struct mgr_priv_data *mp;
-	struct ovl_priv_data *op;
-
-	mp = get_mgr_priv(mgr);
-
-	if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
-		return true;
-
-	list_for_each_entry(ovl, &mgr->overlays, list) {
-		op = get_ovl_priv(ovl);
-		if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
-			return true;
-	}
-
-	return false;
-}
-
-/* returns true if an extra_info field is currently being updated */
-static bool extra_info_update_ongoing(void)
-{
-	const int num_mgrs = dss_feat_get_num_mgrs();
-	int i;
-
-	for (i = 0; i < num_mgrs; ++i) {
-		struct omap_overlay_manager *mgr;
-		struct omap_overlay *ovl;
-		struct mgr_priv_data *mp;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mp->enabled)
-			continue;
-
-		if (!mp->updating)
-			continue;
-
-		if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
-			return true;
-
-		list_for_each_entry(ovl, &mgr->overlays, list) {
-			struct ovl_priv_data *op = get_ovl_priv(ovl);
-
-			if (op->extra_info_dirty || op->shadow_extra_info_dirty)
-				return true;
-		}
-	}
-
-	return false;
-}
-
-/* wait until no extra_info updates are pending */
-static void wait_pending_extra_info_updates(void)
-{
-	bool updating;
-	unsigned long flags;
-	unsigned long t;
-	int r;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	updating = extra_info_update_ongoing();
-
-	if (!updating) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		return;
-	}
-
-	init_completion(&extra_updated_completion);
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	t = msecs_to_jiffies(500);
-	r = wait_for_completion_timeout(&extra_updated_completion, t);
-	if (r == 0)
-		DSSWARN("timeout in wait_pending_extra_info_updates\n");
-}
-
-static struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
-{
-	struct omap_dss_device *dssdev;
-
-	dssdev = mgr->output;
-	if (dssdev == NULL)
-		return NULL;
-
-	while (dssdev->dst)
-		dssdev = dssdev->dst;
-
-	if (dssdev->driver)
-		return dssdev;
-	else
-		return NULL;
-}
-
-static struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
-{
-	return ovl->manager ? dss_mgr_get_device(ovl->manager) : NULL;
-}
-
-static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
-{
-	unsigned long timeout = msecs_to_jiffies(500);
-	u32 irq;
-	int r;
-
-	if (mgr->output == NULL)
-		return -ENODEV;
-
-	r = dispc_runtime_get();
-	if (r)
-		return r;
-
-	switch (mgr->output->id) {
-	case OMAP_DSS_OUTPUT_VENC:
-		irq = DISPC_IRQ_EVSYNC_ODD;
-		break;
-	case OMAP_DSS_OUTPUT_HDMI:
-		irq = DISPC_IRQ_EVSYNC_EVEN;
-		break;
-	default:
-		irq = dispc_mgr_get_vsync_irq(mgr->id);
-		break;
-	}
-
-	r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-
-	dispc_runtime_put();
-
-	return r;
-}
-
-static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
-{
-	unsigned long timeout = msecs_to_jiffies(500);
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	u32 irq;
-	unsigned long flags;
-	int r;
-	int i;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (mgr_manual_update(mgr)) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		return 0;
-	}
-
-	if (!mp->enabled) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		return 0;
-	}
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	r = dispc_runtime_get();
-	if (r)
-		return r;
-
-	irq = dispc_mgr_get_vsync_irq(mgr->id);
-
-	i = 0;
-	while (1) {
-		bool shadow_dirty, dirty;
-
-		spin_lock_irqsave(&data_lock, flags);
-		dirty = mp->info_dirty;
-		shadow_dirty = mp->shadow_info_dirty;
-		spin_unlock_irqrestore(&data_lock, flags);
-
-		if (!dirty && !shadow_dirty) {
-			r = 0;
-			break;
-		}
-
-		/* 4 iterations is the worst case:
-		 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
-		 * 2 - first VSYNC, dirty = true
-		 * 3 - dirty = false, shadow_dirty = true
-		 * 4 - shadow_dirty = false */
-		if (i++ == 3) {
-			DSSERR("mgr(%d)->wait_for_go() not finishing\n",
-					mgr->id);
-			r = 0;
-			break;
-		}
-
-		r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-		if (r == -ERESTARTSYS)
-			break;
-
-		if (r) {
-			DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
-			break;
-		}
-	}
-
-	dispc_runtime_put();
-
-	return r;
-}
-
-static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
-{
-	unsigned long timeout = msecs_to_jiffies(500);
-	struct ovl_priv_data *op;
-	struct mgr_priv_data *mp;
-	u32 irq;
-	unsigned long flags;
-	int r;
-	int i;
-
-	if (!ovl->manager)
-		return 0;
-
-	mp = get_mgr_priv(ovl->manager);
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (ovl_manual_update(ovl)) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		return 0;
-	}
-
-	if (!mp->enabled) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		return 0;
-	}
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	r = dispc_runtime_get();
-	if (r)
-		return r;
-
-	irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
-
-	op = get_ovl_priv(ovl);
-	i = 0;
-	while (1) {
-		bool shadow_dirty, dirty;
-
-		spin_lock_irqsave(&data_lock, flags);
-		dirty = op->info_dirty;
-		shadow_dirty = op->shadow_info_dirty;
-		spin_unlock_irqrestore(&data_lock, flags);
-
-		if (!dirty && !shadow_dirty) {
-			r = 0;
-			break;
-		}
-
-		/* 4 iterations is the worst case:
-		 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
-		 * 2 - first VSYNC, dirty = true
-		 * 3 - dirty = false, shadow_dirty = true
-		 * 4 - shadow_dirty = false */
-		if (i++ == 3) {
-			DSSERR("ovl(%d)->wait_for_go() not finishing\n",
-					ovl->id);
-			r = 0;
-			break;
-		}
-
-		r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-		if (r == -ERESTARTSYS)
-			break;
-
-		if (r) {
-			DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
-			break;
-		}
-	}
-
-	dispc_runtime_put();
-
-	return r;
-}
-
-static void dss_ovl_write_regs(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	struct omap_overlay_info *oi;
-	bool replication;
-	struct mgr_priv_data *mp;
-	int r;
-
-	DSSDBG("writing ovl %d regs\n", ovl->id);
-
-	if (!op->enabled || !op->info_dirty)
-		return;
-
-	oi = &op->info;
-
-	mp = get_mgr_priv(ovl->manager);
-
-	replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
-
-	r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
-	if (r) {
-		/*
-		 * We can't do much here, as this function can be called from
-		 * vsync interrupt.
-		 */
-		DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
-
-		/* This will leave fifo configurations in a nonoptimal state */
-		op->enabled = false;
-		dispc_ovl_enable(ovl->id, false);
-		return;
-	}
-
-	op->info_dirty = false;
-	if (mp->updating)
-		op->shadow_info_dirty = true;
-}
-
-static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	struct mgr_priv_data *mp;
-
-	DSSDBG("writing ovl %d regs extra\n", ovl->id);
-
-	if (!op->extra_info_dirty)
-		return;
-
-	/* note: write also when op->enabled == false, so that the ovl gets
-	 * disabled */
-
-	dispc_ovl_enable(ovl->id, op->enabled);
-	dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
-
-	mp = get_mgr_priv(ovl->manager);
-
-	op->extra_info_dirty = false;
-	if (mp->updating)
-		op->shadow_extra_info_dirty = true;
-}
-
-static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	struct omap_overlay *ovl;
-
-	DSSDBG("writing mgr %d regs\n", mgr->id);
-
-	if (!mp->enabled)
-		return;
-
-	WARN_ON(mp->busy);
-
-	/* Commit overlay settings */
-	list_for_each_entry(ovl, &mgr->overlays, list) {
-		dss_ovl_write_regs(ovl);
-		dss_ovl_write_regs_extra(ovl);
-	}
-
-	if (mp->info_dirty) {
-		dispc_mgr_setup(mgr->id, &mp->info);
-
-		mp->info_dirty = false;
-		if (mp->updating)
-			mp->shadow_info_dirty = true;
-	}
-}
-
-static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	DSSDBG("writing mgr %d regs extra\n", mgr->id);
-
-	if (!mp->extra_info_dirty)
-		return;
-
-	dispc_mgr_set_timings(mgr->id, &mp->timings);
-
-	/* lcd_config parameters */
-	if (dss_mgr_is_lcd(mgr->id))
-		dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
-
-	mp->extra_info_dirty = false;
-	if (mp->updating)
-		mp->shadow_extra_info_dirty = true;
-}
-
-static void dss_write_regs(void)
-{
-	const int num_mgrs = omap_dss_get_num_overlay_managers();
-	int i;
-
-	for (i = 0; i < num_mgrs; ++i) {
-		struct omap_overlay_manager *mgr;
-		struct mgr_priv_data *mp;
-		int r;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
-			continue;
-
-		r = dss_check_settings(mgr);
-		if (r) {
-			DSSERR("cannot write registers for manager %s: "
-					"illegal configuration\n", mgr->name);
-			continue;
-		}
-
-		dss_mgr_write_regs(mgr);
-		dss_mgr_write_regs_extra(mgr);
-	}
-}
-
-static void dss_set_go_bits(void)
-{
-	const int num_mgrs = omap_dss_get_num_overlay_managers();
-	int i;
-
-	for (i = 0; i < num_mgrs; ++i) {
-		struct omap_overlay_manager *mgr;
-		struct mgr_priv_data *mp;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
-			continue;
-
-		if (!need_go(mgr))
-			continue;
-
-		mp->busy = true;
-
-		if (!dss_data.irq_enabled && need_isr())
-			dss_register_vsync_isr();
-
-		dispc_mgr_go(mgr->id);
-	}
-
-}
-
-static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
-{
-	struct omap_overlay *ovl;
-	struct mgr_priv_data *mp;
-	struct ovl_priv_data *op;
-
-	mp = get_mgr_priv(mgr);
-	mp->shadow_info_dirty = false;
-	mp->shadow_extra_info_dirty = false;
-
-	list_for_each_entry(ovl, &mgr->overlays, list) {
-		op = get_ovl_priv(ovl);
-		op->shadow_info_dirty = false;
-		op->shadow_extra_info_dirty = false;
-	}
-}
-
-static int dss_mgr_connect_compat(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst)
-{
-	return mgr->set_output(mgr, dst);
-}
-
-static void dss_mgr_disconnect_compat(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst)
-{
-	mgr->unset_output(mgr);
-}
-
-static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-	int r;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	WARN_ON(mp->updating);
-
-	r = dss_check_settings(mgr);
-	if (r) {
-		DSSERR("cannot start manual update: illegal configuration\n");
-		spin_unlock_irqrestore(&data_lock, flags);
-		return;
-	}
-
-	dss_mgr_write_regs(mgr);
-	dss_mgr_write_regs_extra(mgr);
-
-	mp->updating = true;
-
-	if (!dss_data.irq_enabled && need_isr())
-		dss_register_vsync_isr();
-
-	dispc_mgr_enable_sync(mgr->id);
-
-	spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static void dss_apply_irq_handler(void *data, u32 mask);
-
-static void dss_register_vsync_isr(void)
-{
-	const int num_mgrs = dss_feat_get_num_mgrs();
-	u32 mask;
-	int r, i;
-
-	mask = 0;
-	for (i = 0; i < num_mgrs; ++i)
-		mask |= dispc_mgr_get_vsync_irq(i);
-
-	for (i = 0; i < num_mgrs; ++i)
-		mask |= dispc_mgr_get_framedone_irq(i);
-
-	r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
-	WARN_ON(r);
-
-	dss_data.irq_enabled = true;
-}
-
-static void dss_unregister_vsync_isr(void)
-{
-	const int num_mgrs = dss_feat_get_num_mgrs();
-	u32 mask;
-	int r, i;
-
-	mask = 0;
-	for (i = 0; i < num_mgrs; ++i)
-		mask |= dispc_mgr_get_vsync_irq(i);
-
-	for (i = 0; i < num_mgrs; ++i)
-		mask |= dispc_mgr_get_framedone_irq(i);
-
-	r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
-	WARN_ON(r);
-
-	dss_data.irq_enabled = false;
-}
-
-static void dss_apply_irq_handler(void *data, u32 mask)
-{
-	const int num_mgrs = dss_feat_get_num_mgrs();
-	int i;
-	bool extra_updating;
-
-	spin_lock(&data_lock);
-
-	/* clear busy, updating flags, shadow_dirty flags */
-	for (i = 0; i < num_mgrs; i++) {
-		struct omap_overlay_manager *mgr;
-		struct mgr_priv_data *mp;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mp->enabled)
-			continue;
-
-		mp->updating = dispc_mgr_is_enabled(i);
-
-		if (!mgr_manual_update(mgr)) {
-			bool was_busy = mp->busy;
-			mp->busy = dispc_mgr_go_busy(i);
-
-			if (was_busy && !mp->busy)
-				mgr_clear_shadow_dirty(mgr);
-		}
-	}
-
-	dss_write_regs();
-	dss_set_go_bits();
-
-	extra_updating = extra_info_update_ongoing();
-	if (!extra_updating)
-		complete_all(&extra_updated_completion);
-
-	/* call framedone handlers for manual update displays */
-	for (i = 0; i < num_mgrs; i++) {
-		struct omap_overlay_manager *mgr;
-		struct mgr_priv_data *mp;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		mp = get_mgr_priv(mgr);
-
-		if (!mgr_manual_update(mgr) || !mp->framedone_handler)
-			continue;
-
-		if (mask & dispc_mgr_get_framedone_irq(i))
-			mp->framedone_handler(mp->framedone_handler_data);
-	}
-
-	if (!need_isr())
-		dss_unregister_vsync_isr();
-
-	spin_unlock(&data_lock);
-}
-
-static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op;
-
-	op = get_ovl_priv(ovl);
-
-	if (!op->user_info_dirty)
-		return;
-
-	op->user_info_dirty = false;
-	op->info_dirty = true;
-	op->info = op->user_info;
-}
-
-static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp;
-
-	mp = get_mgr_priv(mgr);
-
-	if (!mp->user_info_dirty)
-		return;
-
-	mp->user_info_dirty = false;
-	mp->info_dirty = true;
-	mp->info = mp->user_info;
-}
-
-static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
-{
-	unsigned long flags;
-	struct omap_overlay *ovl;
-	int r;
-
-	DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	r = dss_check_settings_apply(mgr);
-	if (r) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		DSSERR("failed to apply settings: illegal configuration.\n");
-		return r;
-	}
-
-	/* Configure overlays */
-	list_for_each_entry(ovl, &mgr->overlays, list)
-		omap_dss_mgr_apply_ovl(ovl);
-
-	/* Configure manager */
-	omap_dss_mgr_apply_mgr(mgr);
-
-	dss_write_regs();
-	dss_set_go_bits();
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	return 0;
-}
-
-static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
-{
-	struct ovl_priv_data *op;
-
-	op = get_ovl_priv(ovl);
-
-	if (op->enabled == enable)
-		return;
-
-	op->enabled = enable;
-	op->extra_info_dirty = true;
-}
-
-static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
-		u32 fifo_low, u32 fifo_high)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-
-	if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
-		return;
-
-	op->fifo_low = fifo_low;
-	op->fifo_high = fifo_high;
-	op->extra_info_dirty = true;
-}
-
-static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	u32 fifo_low, fifo_high;
-	bool use_fifo_merge = false;
-
-	if (!op->enabled && !op->enabling)
-		return;
-
-	dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
-			use_fifo_merge, ovl_manual_update(ovl));
-
-	dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
-}
-
-static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
-{
-	struct omap_overlay *ovl;
-	struct mgr_priv_data *mp;
-
-	mp = get_mgr_priv(mgr);
-
-	if (!mp->enabled)
-		return;
-
-	list_for_each_entry(ovl, &mgr->overlays, list)
-		dss_ovl_setup_fifo(ovl);
-}
-
-static void dss_setup_fifos(void)
-{
-	const int num_mgrs = omap_dss_get_num_overlay_managers();
-	struct omap_overlay_manager *mgr;
-	int i;
-
-	for (i = 0; i < num_mgrs; ++i) {
-		mgr = omap_dss_get_overlay_manager(i);
-		dss_mgr_setup_fifos(mgr);
-	}
-}
-
-static int dss_mgr_enable_compat(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-	int r;
-
-	mutex_lock(&apply_lock);
-
-	if (mp->enabled)
-		goto out;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	mp->enabled = true;
-
-	r = dss_check_settings(mgr);
-	if (r) {
-		DSSERR("failed to enable manager %d: check_settings failed\n",
-				mgr->id);
-		goto err;
-	}
-
-	dss_setup_fifos();
-
-	dss_write_regs();
-	dss_set_go_bits();
-
-	if (!mgr_manual_update(mgr))
-		mp->updating = true;
-
-	if (!dss_data.irq_enabled && need_isr())
-		dss_register_vsync_isr();
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	if (!mgr_manual_update(mgr))
-		dispc_mgr_enable_sync(mgr->id);
-
-out:
-	mutex_unlock(&apply_lock);
-
-	return 0;
-
-err:
-	mp->enabled = false;
-	spin_unlock_irqrestore(&data_lock, flags);
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-
-	mutex_lock(&apply_lock);
-
-	if (!mp->enabled)
-		goto out;
-
-	wait_pending_extra_info_updates();
-
-	if (!mgr_manual_update(mgr))
-		dispc_mgr_disable_sync(mgr->id);
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	mp->updating = false;
-	mp->enabled = false;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-out:
-	mutex_unlock(&apply_lock);
-}
-
-static int dss_mgr_set_info(struct omap_overlay_manager *mgr,
-		struct omap_overlay_manager_info *info)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-	int r;
-
-	r = dss_mgr_simple_check(mgr, info);
-	if (r)
-		return r;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	mp->user_info = *info;
-	mp->user_info_dirty = true;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	return 0;
-}
-
-static void dss_mgr_get_info(struct omap_overlay_manager *mgr,
-		struct omap_overlay_manager_info *info)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	*info = mp->user_info;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *output)
-{
-	int r;
-
-	mutex_lock(&apply_lock);
-
-	if (mgr->output) {
-		DSSERR("manager %s is already connected to an output\n",
-			mgr->name);
-		r = -EINVAL;
-		goto err;
-	}
-
-	if ((mgr->supported_outputs & output->id) == 0) {
-		DSSERR("output does not support manager %s\n",
-			mgr->name);
-		r = -EINVAL;
-		goto err;
-	}
-
-	output->manager = mgr;
-	mgr->output = output;
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-err:
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
-{
-	int r;
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-	unsigned long flags;
-
-	mutex_lock(&apply_lock);
-
-	if (!mgr->output) {
-		DSSERR("failed to unset output, output not set\n");
-		r = -EINVAL;
-		goto err;
-	}
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (mp->enabled) {
-		DSSERR("output can't be unset when manager is enabled\n");
-		r = -EINVAL;
-		goto err1;
-	}
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	mgr->output->manager = NULL;
-	mgr->output = NULL;
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-err1:
-	spin_unlock_irqrestore(&data_lock, flags);
-err:
-	mutex_unlock(&apply_lock);
-
-	return r;
-}
-
-static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
-		const struct omap_video_timings *timings)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	mp->timings = *timings;
-	mp->extra_info_dirty = true;
-}
-
-static void dss_mgr_set_timings_compat(struct omap_overlay_manager *mgr,
-		const struct omap_video_timings *timings)
-{
-	unsigned long flags;
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (mp->updating) {
-		DSSERR("cannot set timings for %s: manager needs to be disabled\n",
-			mgr->name);
-		goto out;
-	}
-
-	dss_apply_mgr_timings(mgr, timings);
-out:
-	spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
-		const struct dss_lcd_mgr_config *config)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	mp->lcd_config = *config;
-	mp->extra_info_dirty = true;
-}
-
-static void dss_mgr_set_lcd_config_compat(struct omap_overlay_manager *mgr,
-		const struct dss_lcd_mgr_config *config)
-{
-	unsigned long flags;
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (mp->enabled) {
-		DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
-			mgr->name);
-		goto out;
-	}
-
-	dss_apply_mgr_lcd_config(mgr, config);
-out:
-	spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_ovl_set_info(struct omap_overlay *ovl,
-		struct omap_overlay_info *info)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	int r;
-
-	r = dss_ovl_simple_check(ovl, info);
-	if (r)
-		return r;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	op->user_info = *info;
-	op->user_info_dirty = true;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	return 0;
-}
-
-static void dss_ovl_get_info(struct omap_overlay *ovl,
-		struct omap_overlay_info *info)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	*info = op->user_info;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_ovl_set_manager(struct omap_overlay *ovl,
-		struct omap_overlay_manager *mgr)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	int r;
-
-	if (!mgr)
-		return -EINVAL;
-
-	mutex_lock(&apply_lock);
-
-	if (ovl->manager) {
-		DSSERR("overlay '%s' already has a manager '%s'\n",
-				ovl->name, ovl->manager->name);
-		r = -EINVAL;
-		goto err;
-	}
-
-	r = dispc_runtime_get();
-	if (r)
-		goto err;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (op->enabled) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		DSSERR("overlay has to be disabled to change the manager\n");
-		r = -EINVAL;
-		goto err1;
-	}
-
-	dispc_ovl_set_channel_out(ovl->id, mgr->id);
-
-	ovl->manager = mgr;
-	list_add_tail(&ovl->list, &mgr->overlays);
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	dispc_runtime_put();
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-
-err1:
-	dispc_runtime_put();
-err:
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static int dss_ovl_unset_manager(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	int r;
-
-	mutex_lock(&apply_lock);
-
-	if (!ovl->manager) {
-		DSSERR("failed to detach overlay: manager not set\n");
-		r = -EINVAL;
-		goto err;
-	}
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (op->enabled) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		DSSERR("overlay has to be disabled to unset the manager\n");
-		r = -EINVAL;
-		goto err;
-	}
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	/* wait for pending extra_info updates to ensure the ovl is disabled */
-	wait_pending_extra_info_updates();
-
-	/*
-	 * For a manual update display, there is no guarantee that the overlay
-	 * is really disabled in HW, we may need an extra update from this
-	 * manager before the configurations can go in. Return an error if the
-	 * overlay needed an update from the manager.
-	 *
-	 * TODO: Instead of returning an error, try to do a dummy manager update
-	 * here to disable the overlay in hardware. Use the *GATED fields in
-	 * the DISPC_CONFIG registers to do a dummy update.
-	 */
-	spin_lock_irqsave(&data_lock, flags);
-
-	if (ovl_manual_update(ovl) && op->extra_info_dirty) {
-		spin_unlock_irqrestore(&data_lock, flags);
-		DSSERR("need an update to change the manager\n");
-		r = -EINVAL;
-		goto err;
-	}
-
-	ovl->manager = NULL;
-	list_del(&ovl->list);
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-err:
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static bool dss_ovl_is_enabled(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	bool e;
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	e = op->enabled;
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	return e;
-}
-
-static int dss_ovl_enable(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	int r;
-
-	mutex_lock(&apply_lock);
-
-	if (op->enabled) {
-		r = 0;
-		goto err1;
-	}
-
-	if (ovl->manager == NULL || ovl->manager->output == NULL) {
-		r = -EINVAL;
-		goto err1;
-	}
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	op->enabling = true;
-
-	r = dss_check_settings(ovl->manager);
-	if (r) {
-		DSSERR("failed to enable overlay %d: check_settings failed\n",
-				ovl->id);
-		goto err2;
-	}
-
-	dss_setup_fifos();
-
-	op->enabling = false;
-	dss_apply_ovl_enable(ovl, true);
-
-	dss_write_regs();
-	dss_set_go_bits();
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-err2:
-	op->enabling = false;
-	spin_unlock_irqrestore(&data_lock, flags);
-err1:
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static int dss_ovl_disable(struct omap_overlay *ovl)
-{
-	struct ovl_priv_data *op = get_ovl_priv(ovl);
-	unsigned long flags;
-	int r;
-
-	mutex_lock(&apply_lock);
-
-	if (!op->enabled) {
-		r = 0;
-		goto err;
-	}
-
-	if (ovl->manager == NULL || ovl->manager->output == NULL) {
-		r = -EINVAL;
-		goto err;
-	}
-
-	spin_lock_irqsave(&data_lock, flags);
-
-	dss_apply_ovl_enable(ovl, false);
-	dss_write_regs();
-	dss_set_go_bits();
-
-	spin_unlock_irqrestore(&data_lock, flags);
-
-	mutex_unlock(&apply_lock);
-
-	return 0;
-
-err:
-	mutex_unlock(&apply_lock);
-	return r;
-}
-
-static int dss_mgr_register_framedone_handler_compat(struct omap_overlay_manager *mgr,
-		void (*handler)(void *), void *data)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	if (mp->framedone_handler)
-		return -EBUSY;
-
-	mp->framedone_handler = handler;
-	mp->framedone_handler_data = data;
-
-	return 0;
-}
-
-static void dss_mgr_unregister_framedone_handler_compat(struct omap_overlay_manager *mgr,
-		void (*handler)(void *), void *data)
-{
-	struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
-	WARN_ON(mp->framedone_handler != handler ||
-			mp->framedone_handler_data != data);
-
-	mp->framedone_handler = NULL;
-	mp->framedone_handler_data = NULL;
-}
-
-static const struct dss_mgr_ops apply_mgr_ops = {
-	.connect = dss_mgr_connect_compat,
-	.disconnect = dss_mgr_disconnect_compat,
-	.start_update = dss_mgr_start_update_compat,
-	.enable = dss_mgr_enable_compat,
-	.disable = dss_mgr_disable_compat,
-	.set_timings = dss_mgr_set_timings_compat,
-	.set_lcd_config = dss_mgr_set_lcd_config_compat,
-	.register_framedone_handler = dss_mgr_register_framedone_handler_compat,
-	.unregister_framedone_handler = dss_mgr_unregister_framedone_handler_compat,
-};
-
-static int compat_refcnt;
-static DEFINE_MUTEX(compat_init_lock);
-
-int omapdss_compat_init(void)
-{
-	struct platform_device *pdev = dss_get_core_pdev();
-	int i, r;
-
-	mutex_lock(&compat_init_lock);
-
-	if (compat_refcnt++ > 0)
-		goto out;
-
-	apply_init_priv();
-
-	dss_init_overlay_managers_sysfs(pdev);
-	dss_init_overlays(pdev);
-
-	for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
-		struct omap_overlay_manager *mgr;
-
-		mgr = omap_dss_get_overlay_manager(i);
-
-		mgr->set_output = &dss_mgr_set_output;
-		mgr->unset_output = &dss_mgr_unset_output;
-		mgr->apply = &omap_dss_mgr_apply;
-		mgr->set_manager_info = &dss_mgr_set_info;
-		mgr->get_manager_info = &dss_mgr_get_info;
-		mgr->wait_for_go = &dss_mgr_wait_for_go;
-		mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
-		mgr->get_device = &dss_mgr_get_device;
-	}
-
-	for (i = 0; i < omap_dss_get_num_overlays(); i++) {
-		struct omap_overlay *ovl = omap_dss_get_overlay(i);
-
-		ovl->is_enabled = &dss_ovl_is_enabled;
-		ovl->enable = &dss_ovl_enable;
-		ovl->disable = &dss_ovl_disable;
-		ovl->set_manager = &dss_ovl_set_manager;
-		ovl->unset_manager = &dss_ovl_unset_manager;
-		ovl->set_overlay_info = &dss_ovl_set_info;
-		ovl->get_overlay_info = &dss_ovl_get_info;
-		ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
-		ovl->get_device = &dss_ovl_get_device;
-	}
-
-	r = dss_install_mgr_ops(&apply_mgr_ops);
-	if (r)
-		goto err_mgr_ops;
-
-	r = display_init_sysfs(pdev);
-	if (r)
-		goto err_disp_sysfs;
-
-	dispc_runtime_get();
-
-	r = dss_dispc_initialize_irq();
-	if (r)
-		goto err_init_irq;
-
-	dispc_runtime_put();
-
-out:
-	mutex_unlock(&compat_init_lock);
-
-	return 0;
-
-err_init_irq:
-	dispc_runtime_put();
-	display_uninit_sysfs(pdev);
-
-err_disp_sysfs:
-	dss_uninstall_mgr_ops();
-
-err_mgr_ops:
-	dss_uninit_overlay_managers_sysfs(pdev);
-	dss_uninit_overlays(pdev);
-
-	compat_refcnt--;
-
-	mutex_unlock(&compat_init_lock);
-
-	return r;
-}
-EXPORT_SYMBOL(omapdss_compat_init);
-
-void omapdss_compat_uninit(void)
-{
-	struct platform_device *pdev = dss_get_core_pdev();
-
-	mutex_lock(&compat_init_lock);
-
-	if (--compat_refcnt > 0)
-		goto out;
-
-	dss_dispc_uninitialize_irq();
-
-	display_uninit_sysfs(pdev);
-
-	dss_uninstall_mgr_ops();
-
-	dss_uninit_overlay_managers_sysfs(pdev);
-	dss_uninit_overlays(pdev);
-out:
-	mutex_unlock(&compat_init_lock);
-}
-EXPORT_SYMBOL(omapdss_compat_uninit);
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 54eeb50..7e4e5be 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -165,32 +165,20 @@
 #endif /* CONFIG_OMAP2_DSS_DEBUGFS */
 
 /* PLATFORM DEVICE */
-static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+
+static void dss_disable_all_devices(void)
 {
-	DSSDBG("pm notif %lu\n", v);
+	struct omap_dss_device *dssdev = NULL;
 
-	switch (v) {
-	case PM_SUSPEND_PREPARE:
-	case PM_HIBERNATION_PREPARE:
-	case PM_RESTORE_PREPARE:
-		DSSDBG("suspending displays\n");
-		return dss_suspend_all_devices();
+	for_each_dss_dev(dssdev) {
+		if (!dssdev->driver)
+			continue;
 
-	case PM_POST_SUSPEND:
-	case PM_POST_HIBERNATION:
-	case PM_POST_RESTORE:
-		DSSDBG("resuming displays\n");
-		return dss_resume_all_devices();
-
-	default:
-		return 0;
+		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+			dssdev->driver->disable(dssdev);
 	}
 }
 
-static struct notifier_block omap_dss_pm_notif_block = {
-	.notifier_call = omap_dss_pm_notif,
-};
-
 static int __init omap_dss_probe(struct platform_device *pdev)
 {
 	struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -211,8 +199,6 @@
 	else if (pdata->default_device)
 		core.default_display_name = pdata->default_device->name;
 
-	register_pm_notifier(&omap_dss_pm_notif_block);
-
 	return 0;
 
 err_debugfs:
@@ -222,8 +208,6 @@
 
 static int omap_dss_remove(struct platform_device *pdev)
 {
-	unregister_pm_notifier(&omap_dss_pm_notif_block);
-
 	dss_uninitialize_debugfs();
 
 	return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.c b/drivers/gpu/drm/omapdrm/dss/dispc-compat.c
deleted file mode 100644
index 0918b3b..0000000
--- a/drivers/gpu/drm/omapdrm/dss/dispc-compat.c
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "APPLY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-#include "dispc-compat.h"
-
-#define DISPC_IRQ_MASK_ERROR            (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
-					 DISPC_IRQ_OCP_ERR | \
-					 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
-					 DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
-					 DISPC_IRQ_SYNC_LOST | \
-					 DISPC_IRQ_SYNC_LOST_DIGIT)
-
-#define DISPC_MAX_NR_ISRS		8
-
-struct omap_dispc_isr_data {
-	omap_dispc_isr_t	isr;
-	void			*arg;
-	u32			mask;
-};
-
-struct dispc_irq_stats {
-	unsigned long last_reset;
-	unsigned irq_count;
-	unsigned irqs[32];
-};
-
-static struct {
-	spinlock_t irq_lock;
-	u32 irq_error_mask;
-	struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
-	u32 error_irqs;
-	struct work_struct error_work;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-	spinlock_t irq_stats_lock;
-	struct dispc_irq_stats irq_stats;
-#endif
-} dispc_compat;
-
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dispc_dump_irqs(struct seq_file *s)
-{
-	unsigned long flags;
-	struct dispc_irq_stats stats;
-
-	spin_lock_irqsave(&dispc_compat.irq_stats_lock, flags);
-
-	stats = dispc_compat.irq_stats;
-	memset(&dispc_compat.irq_stats, 0, sizeof(dispc_compat.irq_stats));
-	dispc_compat.irq_stats.last_reset = jiffies;
-
-	spin_unlock_irqrestore(&dispc_compat.irq_stats_lock, flags);
-
-	seq_printf(s, "period %u ms\n",
-			jiffies_to_msecs(jiffies - stats.last_reset));
-
-	seq_printf(s, "irqs %d\n", stats.irq_count);
-#define PIS(x) \
-	seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
-
-	PIS(FRAMEDONE);
-	PIS(VSYNC);
-	PIS(EVSYNC_EVEN);
-	PIS(EVSYNC_ODD);
-	PIS(ACBIAS_COUNT_STAT);
-	PIS(PROG_LINE_NUM);
-	PIS(GFX_FIFO_UNDERFLOW);
-	PIS(GFX_END_WIN);
-	PIS(PAL_GAMMA_MASK);
-	PIS(OCP_ERR);
-	PIS(VID1_FIFO_UNDERFLOW);
-	PIS(VID1_END_WIN);
-	PIS(VID2_FIFO_UNDERFLOW);
-	PIS(VID2_END_WIN);
-	if (dss_feat_get_num_ovls() > 3) {
-		PIS(VID3_FIFO_UNDERFLOW);
-		PIS(VID3_END_WIN);
-	}
-	PIS(SYNC_LOST);
-	PIS(SYNC_LOST_DIGIT);
-	PIS(WAKEUP);
-	if (dss_has_feature(FEAT_MGR_LCD2)) {
-		PIS(FRAMEDONE2);
-		PIS(VSYNC2);
-		PIS(ACBIAS_COUNT_STAT2);
-		PIS(SYNC_LOST2);
-	}
-	if (dss_has_feature(FEAT_MGR_LCD3)) {
-		PIS(FRAMEDONE3);
-		PIS(VSYNC3);
-		PIS(ACBIAS_COUNT_STAT3);
-		PIS(SYNC_LOST3);
-	}
-#undef PIS
-}
-#endif
-
-/* dispc.irq_lock has to be locked by the caller */
-static void _omap_dispc_set_irqs(void)
-{
-	u32 mask;
-	int i;
-	struct omap_dispc_isr_data *isr_data;
-
-	mask = dispc_compat.irq_error_mask;
-
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		isr_data = &dispc_compat.registered_isr[i];
-
-		if (isr_data->isr == NULL)
-			continue;
-
-		mask |= isr_data->mask;
-	}
-
-	dispc_write_irqenable(mask);
-}
-
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
-	int i;
-	int ret;
-	unsigned long flags;
-	struct omap_dispc_isr_data *isr_data;
-
-	if (isr == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-
-	/* check for duplicate entry */
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		isr_data = &dispc_compat.registered_isr[i];
-		if (isr_data->isr == isr && isr_data->arg == arg &&
-				isr_data->mask == mask) {
-			ret = -EINVAL;
-			goto err;
-		}
-	}
-
-	isr_data = NULL;
-	ret = -EBUSY;
-
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		isr_data = &dispc_compat.registered_isr[i];
-
-		if (isr_data->isr != NULL)
-			continue;
-
-		isr_data->isr = isr;
-		isr_data->arg = arg;
-		isr_data->mask = mask;
-		ret = 0;
-
-		break;
-	}
-
-	if (ret)
-		goto err;
-
-	_omap_dispc_set_irqs();
-
-	spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
-	return 0;
-err:
-	spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(omap_dispc_register_isr);
-
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
-	int i;
-	unsigned long flags;
-	int ret = -EINVAL;
-	struct omap_dispc_isr_data *isr_data;
-
-	spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		isr_data = &dispc_compat.registered_isr[i];
-		if (isr_data->isr != isr || isr_data->arg != arg ||
-				isr_data->mask != mask)
-			continue;
-
-		/* found the correct isr */
-
-		isr_data->isr = NULL;
-		isr_data->arg = NULL;
-		isr_data->mask = 0;
-
-		ret = 0;
-		break;
-	}
-
-	if (ret == 0)
-		_omap_dispc_set_irqs();
-
-	spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(omap_dispc_unregister_isr);
-
-static void print_irq_status(u32 status)
-{
-	if ((status & dispc_compat.irq_error_mask) == 0)
-		return;
-
-#define PIS(x) (status & DISPC_IRQ_##x) ? (#x " ") : ""
-
-	pr_debug("DISPC IRQ: 0x%x: %s%s%s%s%s%s%s%s%s\n",
-		status,
-		PIS(OCP_ERR),
-		PIS(GFX_FIFO_UNDERFLOW),
-		PIS(VID1_FIFO_UNDERFLOW),
-		PIS(VID2_FIFO_UNDERFLOW),
-		dss_feat_get_num_ovls() > 3 ? PIS(VID3_FIFO_UNDERFLOW) : "",
-		PIS(SYNC_LOST),
-		PIS(SYNC_LOST_DIGIT),
-		dss_has_feature(FEAT_MGR_LCD2) ? PIS(SYNC_LOST2) : "",
-		dss_has_feature(FEAT_MGR_LCD3) ? PIS(SYNC_LOST3) : "");
-#undef PIS
-}
-
-/* Called from dss.c. Note that we don't touch clocks here,
- * but we presume they are on because we got an IRQ. However,
- * an irq handler may turn the clocks off, so we may not have
- * clock later in the function. */
-static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
-{
-	int i;
-	u32 irqstatus, irqenable;
-	u32 handledirqs = 0;
-	u32 unhandled_errors;
-	struct omap_dispc_isr_data *isr_data;
-	struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
-
-	spin_lock(&dispc_compat.irq_lock);
-
-	irqstatus = dispc_read_irqstatus();
-	irqenable = dispc_read_irqenable();
-
-	/* IRQ is not for us */
-	if (!(irqstatus & irqenable)) {
-		spin_unlock(&dispc_compat.irq_lock);
-		return IRQ_NONE;
-	}
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-	spin_lock(&dispc_compat.irq_stats_lock);
-	dispc_compat.irq_stats.irq_count++;
-	dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs);
-	spin_unlock(&dispc_compat.irq_stats_lock);
-#endif
-
-	print_irq_status(irqstatus);
-
-	/* Ack the interrupt. Do it here before clocks are possibly turned
-	 * off */
-	dispc_clear_irqstatus(irqstatus);
-	/* flush posted write */
-	dispc_read_irqstatus();
-
-	/* make a copy and unlock, so that isrs can unregister
-	 * themselves */
-	memcpy(registered_isr, dispc_compat.registered_isr,
-			sizeof(registered_isr));
-
-	spin_unlock(&dispc_compat.irq_lock);
-
-	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
-		isr_data = &registered_isr[i];
-
-		if (!isr_data->isr)
-			continue;
-
-		if (isr_data->mask & irqstatus) {
-			isr_data->isr(isr_data->arg, irqstatus);
-			handledirqs |= isr_data->mask;
-		}
-	}
-
-	spin_lock(&dispc_compat.irq_lock);
-
-	unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask;
-
-	if (unhandled_errors) {
-		dispc_compat.error_irqs |= unhandled_errors;
-
-		dispc_compat.irq_error_mask &= ~unhandled_errors;
-		_omap_dispc_set_irqs();
-
-		schedule_work(&dispc_compat.error_work);
-	}
-
-	spin_unlock(&dispc_compat.irq_lock);
-
-	return IRQ_HANDLED;
-}
-
-static void dispc_error_worker(struct work_struct *work)
-{
-	int i;
-	u32 errors;
-	unsigned long flags;
-	static const unsigned fifo_underflow_bits[] = {
-		DISPC_IRQ_GFX_FIFO_UNDERFLOW,
-		DISPC_IRQ_VID1_FIFO_UNDERFLOW,
-		DISPC_IRQ_VID2_FIFO_UNDERFLOW,
-		DISPC_IRQ_VID3_FIFO_UNDERFLOW,
-	};
-
-	spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-	errors = dispc_compat.error_irqs;
-	dispc_compat.error_irqs = 0;
-	spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
-	dispc_runtime_get();
-
-	for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
-		struct omap_overlay *ovl;
-		unsigned bit;
-
-		ovl = omap_dss_get_overlay(i);
-		bit = fifo_underflow_bits[i];
-
-		if (bit & errors) {
-			DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
-					ovl->name);
-			ovl->disable(ovl);
-			msleep(50);
-		}
-	}
-
-	for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
-		struct omap_overlay_manager *mgr;
-		unsigned bit;
-
-		mgr = omap_dss_get_overlay_manager(i);
-		bit = dispc_mgr_get_sync_lost_irq(i);
-
-		if (bit & errors) {
-			int j;
-
-			DSSERR("SYNC_LOST on channel %s, restarting the output "
-					"with video overlays disabled\n",
-					mgr->name);
-
-			dss_mgr_disable(mgr);
-
-			for (j = 0; j < omap_dss_get_num_overlays(); ++j) {
-				struct omap_overlay *ovl;
-				ovl = omap_dss_get_overlay(j);
-
-				if (ovl->id != OMAP_DSS_GFX &&
-						ovl->manager == mgr)
-					ovl->disable(ovl);
-			}
-
-			dss_mgr_enable(mgr);
-		}
-	}
-
-	if (errors & DISPC_IRQ_OCP_ERR) {
-		DSSERR("OCP_ERR\n");
-		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
-			struct omap_overlay_manager *mgr;
-
-			mgr = omap_dss_get_overlay_manager(i);
-			dss_mgr_disable(mgr);
-		}
-	}
-
-	spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-	dispc_compat.irq_error_mask |= errors;
-	_omap_dispc_set_irqs();
-	spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
-	dispc_runtime_put();
-}
-
-int dss_dispc_initialize_irq(void)
-{
-	int r;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-	spin_lock_init(&dispc_compat.irq_stats_lock);
-	dispc_compat.irq_stats.last_reset = jiffies;
-	dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
-#endif
-
-	spin_lock_init(&dispc_compat.irq_lock);
-
-	memset(dispc_compat.registered_isr, 0,
-			sizeof(dispc_compat.registered_isr));
-
-	dispc_compat.irq_error_mask = DISPC_IRQ_MASK_ERROR;
-	if (dss_has_feature(FEAT_MGR_LCD2))
-		dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
-	if (dss_has_feature(FEAT_MGR_LCD3))
-		dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
-	if (dss_feat_get_num_ovls() > 3)
-		dispc_compat.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
-
-	/*
-	 * there's SYNC_LOST_DIGIT waiting after enabling the DSS,
-	 * so clear it
-	 */
-	dispc_clear_irqstatus(dispc_read_irqstatus());
-
-	INIT_WORK(&dispc_compat.error_work, dispc_error_worker);
-
-	_omap_dispc_set_irqs();
-
-	r = dispc_request_irq(omap_dispc_irq_handler, &dispc_compat);
-	if (r) {
-		DSSERR("dispc_request_irq failed\n");
-		return r;
-	}
-
-	return 0;
-}
-
-void dss_dispc_uninitialize_irq(void)
-{
-	dispc_free_irq(&dispc_compat);
-}
-
-static void dispc_mgr_disable_isr(void *data, u32 mask)
-{
-	struct completion *compl = data;
-	complete(compl);
-}
-
-static void dispc_mgr_enable_lcd_out(enum omap_channel channel)
-{
-	dispc_mgr_enable(channel, true);
-}
-
-static void dispc_mgr_disable_lcd_out(enum omap_channel channel)
-{
-	DECLARE_COMPLETION_ONSTACK(framedone_compl);
-	int r;
-	u32 irq;
-
-	if (!dispc_mgr_is_enabled(channel))
-		return;
-
-	/*
-	 * When we disable LCD output, we need to wait for FRAMEDONE to know
-	 * that DISPC has finished with the LCD output.
-	 */
-
-	irq = dispc_mgr_get_framedone_irq(channel);
-
-	r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
-			irq);
-	if (r)
-		DSSERR("failed to register FRAMEDONE isr\n");
-
-	dispc_mgr_enable(channel, false);
-
-	/* if we couldn't register for framedone, just sleep and exit */
-	if (r) {
-		msleep(100);
-		return;
-	}
-
-	if (!wait_for_completion_timeout(&framedone_compl,
-				msecs_to_jiffies(100)))
-		DSSERR("timeout waiting for FRAME DONE\n");
-
-	r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
-			irq);
-	if (r)
-		DSSERR("failed to unregister FRAMEDONE isr\n");
-}
-
-static void dispc_digit_out_enable_isr(void *data, u32 mask)
-{
-	struct completion *compl = data;
-
-	/* ignore any sync lost interrupts */
-	if (mask & (DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD))
-		complete(compl);
-}
-
-static void dispc_mgr_enable_digit_out(void)
-{
-	DECLARE_COMPLETION_ONSTACK(vsync_compl);
-	int r;
-	u32 irq_mask;
-
-	if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT))
-		return;
-
-	/*
-	 * Digit output produces some sync lost interrupts during the first
-	 * frame when enabling. Those need to be ignored, so we register for the
-	 * sync lost irq to prevent the error handler from triggering.
-	 */
-
-	irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT) |
-		dispc_mgr_get_sync_lost_irq(OMAP_DSS_CHANNEL_DIGIT);
-
-	r = omap_dispc_register_isr(dispc_digit_out_enable_isr, &vsync_compl,
-			irq_mask);
-	if (r) {
-		DSSERR("failed to register %x isr\n", irq_mask);
-		return;
-	}
-
-	dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, true);
-
-	/* wait for the first evsync */
-	if (!wait_for_completion_timeout(&vsync_compl, msecs_to_jiffies(100)))
-		DSSERR("timeout waiting for digit out to start\n");
-
-	r = omap_dispc_unregister_isr(dispc_digit_out_enable_isr, &vsync_compl,
-			irq_mask);
-	if (r)
-		DSSERR("failed to unregister %x isr\n", irq_mask);
-}
-
-static void dispc_mgr_disable_digit_out(void)
-{
-	DECLARE_COMPLETION_ONSTACK(framedone_compl);
-	int r, i;
-	u32 irq_mask;
-	int num_irqs;
-
-	if (!dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT))
-		return;
-
-	/*
-	 * When we disable the digit output, we need to wait for FRAMEDONE to
-	 * know that DISPC has finished with the output.
-	 */
-
-	irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
-	num_irqs = 1;
-
-	if (!irq_mask) {
-		/*
-		 * omap 2/3 don't have framedone irq for TV, so we need to use
-		 * vsyncs for this.
-		 */
-
-		irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
-		/*
-		 * We need to wait for both even and odd vsyncs. Note that this
-		 * is not totally reliable, as we could get a vsync interrupt
-		 * before we disable the output, which leads to timeout in the
-		 * wait_for_completion.
-		 */
-		num_irqs = 2;
-	}
-
-	r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
-			irq_mask);
-	if (r)
-		DSSERR("failed to register %x isr\n", irq_mask);
-
-	dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);
-
-	/* if we couldn't register the irq, just sleep and exit */
-	if (r) {
-		msleep(100);
-		return;
-	}
-
-	for (i = 0; i < num_irqs; ++i) {
-		if (!wait_for_completion_timeout(&framedone_compl,
-					msecs_to_jiffies(100)))
-			DSSERR("timeout waiting for digit out to stop\n");
-	}
-
-	r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
-			irq_mask);
-	if (r)
-		DSSERR("failed to unregister %x isr\n", irq_mask);
-}
-
-void dispc_mgr_enable_sync(enum omap_channel channel)
-{
-	if (dss_mgr_is_lcd(channel))
-		dispc_mgr_enable_lcd_out(channel);
-	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
-		dispc_mgr_enable_digit_out();
-	else
-		WARN_ON(1);
-}
-
-void dispc_mgr_disable_sync(enum omap_channel channel)
-{
-	if (dss_mgr_is_lcd(channel))
-		dispc_mgr_disable_lcd_out(channel);
-	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
-		dispc_mgr_disable_digit_out();
-	else
-		WARN_ON(1);
-}
-
-static inline void dispc_irq_wait_handler(void *data, u32 mask)
-{
-	complete((struct completion *)data);
-}
-
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
-		unsigned long timeout)
-{
-
-	int r;
-	DECLARE_COMPLETION_ONSTACK(completion);
-
-	r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
-			irqmask);
-
-	if (r)
-		return r;
-
-	timeout = wait_for_completion_interruptible_timeout(&completion,
-			timeout);
-
-	omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
-	if (timeout == 0)
-		return -ETIMEDOUT;
-
-	if (timeout == -ERESTARTSYS)
-		return -ERESTARTSYS;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.h b/drivers/gpu/drm/omapdrm/dss/dispc-compat.h
deleted file mode 100644
index 14a69b3..0000000
--- a/drivers/gpu/drm/omapdrm/dss/dispc-compat.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP2_DSS_DISPC_COMPAT_H
-#define __OMAP2_DSS_DISPC_COMPAT_H
-
-void dispc_mgr_enable_sync(enum omap_channel channel);
-void dispc_mgr_disable_sync(enum omap_channel channel);
-
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
-		unsigned long timeout);
-
-int dss_dispc_initialize_irq(void);
-void dss_dispc_uninitialize_irq(void);
-
-#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 6b50476..f83608b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -104,6 +104,15 @@
 	bool supports_sync_align:1;
 
 	bool has_writeback:1;
+
+	bool supports_double_pixel:1;
+
+	/*
+	 * Field order for VENC is different than HDMI. We should handle this in
+	 * some intelligent manner, but as the SoCs have either HDMI or VENC,
+	 * never both, we can just use this flag for now.
+	 */
+	bool reverse_ilace_field_order:1;
 };
 
 #define DISPC_MAX_NR_FIFOS 5
@@ -2552,47 +2561,6 @@
 	return 0;
 }
 
-int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
-		const struct omap_overlay_info *oi,
-		const struct omap_video_timings *timings,
-		int *x_predecim, int *y_predecim)
-{
-	enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
-	bool five_taps = true;
-	bool fieldmode = false;
-	u16 in_height = oi->height;
-	u16 in_width = oi->width;
-	bool ilace = timings->interlace;
-	u16 out_width, out_height;
-	int pos_x = oi->pos_x;
-	unsigned long pclk = dispc_mgr_pclk_rate(channel);
-	unsigned long lclk = dispc_mgr_lclk_rate(channel);
-
-	out_width = oi->out_width == 0 ? oi->width : oi->out_width;
-	out_height = oi->out_height == 0 ? oi->height : oi->out_height;
-
-	if (ilace && oi->height == out_height)
-		fieldmode = true;
-
-	if (ilace) {
-		if (fieldmode)
-			in_height /= 2;
-		out_height /= 2;
-
-		DSSDBG("adjusting for ilace: height %d, out_height %d\n",
-				in_height, out_height);
-	}
-
-	if (!dss_feat_color_mode_supported(plane, oi->color_mode))
-		return -EINVAL;
-
-	return dispc_ovl_calc_scaling(pclk, lclk, caps, timings, in_width,
-			in_height, out_width, out_height, oi->color_mode,
-			&five_taps, x_predecim, y_predecim, pos_x,
-			oi->rotation_type, false);
-}
-EXPORT_SYMBOL(dispc_ovl_check);
-
 static int dispc_ovl_setup_common(enum omap_plane plane,
 		enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
 		u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
@@ -2747,6 +2715,9 @@
 
 	dispc_ovl_configure_burst_type(plane, rotation_type);
 
+	if (dispc.feat->reverse_ilace_field_order)
+		swap(offset0, offset1);
+
 	dispc_ovl_set_ba0(plane, paddr + offset0);
 	dispc_ovl_set_ba1(plane, paddr + offset1);
 
@@ -2898,6 +2869,12 @@
 }
 EXPORT_SYMBOL(dispc_ovl_enabled);
 
+enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
+{
+	return dss_feat_get_supported_outputs(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_get_supported_outputs);
+
 void dispc_mgr_enable(enum omap_channel channel, bool enable)
 {
 	mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -3287,6 +3264,10 @@
 	} else {
 		if (t.interlace)
 			t.y_res /= 2;
+
+		if (dispc.feat->supports_double_pixel)
+			REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
+				19, 17);
 	}
 
 	dispc_mgr_set_size(channel, t.x_res, t.y_res);
@@ -3951,6 +3932,8 @@
 	.set_max_preload	=	true,
 	.supports_sync_align	=	true,
 	.has_writeback		=	true,
+	.supports_double_pixel	=	true,
+	.reverse_ilace_field_order =	true,
 };
 
 static const struct dispc_features omap54xx_dispc_feats = {
@@ -3974,6 +3957,8 @@
 	.set_max_preload	=	true,
 	.supports_sync_align	=	true,
 	.has_writeback		=	true,
+	.supports_double_pixel	=	true,
+	.reverse_ilace_field_order =	true,
 };
 
 static int dispc_init_features(struct platform_device *pdev)
@@ -4129,8 +4114,6 @@
 
 	dispc_runtime_put();
 
-	dss_init_overlay_managers();
-
 	dss_debugfs_create_file("dispc", dispc_dump_regs);
 
 	return 0;
@@ -4144,8 +4127,6 @@
 			       void *data)
 {
 	pm_runtime_disable(dev);
-
-	dss_uninit_overlay_managers();
 }
 
 static const struct component_ops dispc_component_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display-sysfs.c b/drivers/gpu/drm/omapdrm/dss/display-sysfs.c
deleted file mode 100644
index 6ad0991..0000000
--- a/drivers/gpu/drm/omapdrm/dss/display-sysfs.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sysfs.h>
-
-#include <video/omapdss.h>
-#include "dss.h"
-
-static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			dssdev->name ?
-			dssdev->name : "");
-}
-
-static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			omapdss_device_is_enabled(dssdev));
-}
-
-static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
-		const char *buf, size_t size)
-{
-	int r;
-	bool enable;
-
-	r = strtobool(buf, &enable);
-	if (r)
-		return r;
-
-	if (enable == omapdss_device_is_enabled(dssdev))
-		return size;
-
-	if (omapdss_device_is_connected(dssdev) == false)
-		return -ENODEV;
-
-	if (enable) {
-		r = dssdev->driver->enable(dssdev);
-		if (r)
-			return r;
-	} else {
-		dssdev->driver->disable(dssdev);
-	}
-
-	return size;
-}
-
-static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			dssdev->driver->get_te ?
-			dssdev->driver->get_te(dssdev) : 0);
-}
-
-static ssize_t display_tear_store(struct omap_dss_device *dssdev,
-	const char *buf, size_t size)
-{
-	int r;
-	bool te;
-
-	if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
-		return -ENOENT;
-
-	r = strtobool(buf, &te);
-	if (r)
-		return r;
-
-	r = dssdev->driver->enable_te(dssdev, te);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
-{
-	struct omap_video_timings t;
-
-	if (!dssdev->driver->get_timings)
-		return -ENOENT;
-
-	dssdev->driver->get_timings(dssdev, &t);
-
-	return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
-			t.pixelclock,
-			t.x_res, t.hfp, t.hbp, t.hsw,
-			t.y_res, t.vfp, t.vbp, t.vsw);
-}
-
-static ssize_t display_timings_store(struct omap_dss_device *dssdev,
-	const char *buf, size_t size)
-{
-	struct omap_video_timings t = dssdev->panel.timings;
-	int r, found;
-
-	if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
-		return -ENOENT;
-
-	found = 0;
-#ifdef CONFIG_OMAP2_DSS_VENC
-	if (strncmp("pal", buf, 3) == 0) {
-		t = omap_dss_pal_timings;
-		found = 1;
-	} else if (strncmp("ntsc", buf, 4) == 0) {
-		t = omap_dss_ntsc_timings;
-		found = 1;
-	}
-#endif
-	if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
-				&t.pixelclock,
-				&t.x_res, &t.hfp, &t.hbp, &t.hsw,
-				&t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
-		return -EINVAL;
-
-	r = dssdev->driver->check_timings(dssdev, &t);
-	if (r)
-		return r;
-
-	dssdev->driver->disable(dssdev);
-	dssdev->driver->set_timings(dssdev, &t);
-	r = dssdev->driver->enable(dssdev);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
-{
-	int rotate;
-	if (!dssdev->driver->get_rotate)
-		return -ENOENT;
-	rotate = dssdev->driver->get_rotate(dssdev);
-	return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
-}
-
-static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
-	const char *buf, size_t size)
-{
-	int rot, r;
-
-	if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
-		return -ENOENT;
-
-	r = kstrtoint(buf, 0, &rot);
-	if (r)
-		return r;
-
-	r = dssdev->driver->set_rotate(dssdev, rot);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
-{
-	int mirror;
-	if (!dssdev->driver->get_mirror)
-		return -ENOENT;
-	mirror = dssdev->driver->get_mirror(dssdev);
-	return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
-}
-
-static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
-	const char *buf, size_t size)
-{
-	int r;
-	bool mirror;
-
-	if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
-		return -ENOENT;
-
-	r = strtobool(buf, &mirror);
-	if (r)
-		return r;
-
-	r = dssdev->driver->set_mirror(dssdev, mirror);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
-{
-	unsigned int wss;
-
-	if (!dssdev->driver->get_wss)
-		return -ENOENT;
-
-	wss = dssdev->driver->get_wss(dssdev);
-
-	return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
-}
-
-static ssize_t display_wss_store(struct omap_dss_device *dssdev,
-	const char *buf, size_t size)
-{
-	u32 wss;
-	int r;
-
-	if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
-		return -ENOENT;
-
-	r = kstrtou32(buf, 0, &wss);
-	if (r)
-		return r;
-
-	if (wss > 0xfffff)
-		return -EINVAL;
-
-	r = dssdev->driver->set_wss(dssdev, wss);
-	if (r)
-		return r;
-
-	return size;
-}
-
-struct display_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct omap_dss_device *, char *);
-	ssize_t	(*store)(struct omap_dss_device *, const char *, size_t);
-};
-
-#define DISPLAY_ATTR(_name, _mode, _show, _store) \
-	struct display_attribute display_attr_##_name = \
-	__ATTR(_name, _mode, _show, _store)
-
-static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
-static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
-static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
-		display_enabled_show, display_enabled_store);
-static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
-		display_tear_show, display_tear_store);
-static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
-		display_timings_show, display_timings_store);
-static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
-		display_rotate_show, display_rotate_store);
-static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
-		display_mirror_show, display_mirror_store);
-static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
-		display_wss_show, display_wss_store);
-
-static struct attribute *display_sysfs_attrs[] = {
-	&display_attr_name.attr,
-	&display_attr_display_name.attr,
-	&display_attr_enabled.attr,
-	&display_attr_tear_elim.attr,
-	&display_attr_timings.attr,
-	&display_attr_rotate.attr,
-	&display_attr_mirror.attr,
-	&display_attr_wss.attr,
-	NULL
-};
-
-static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
-		char *buf)
-{
-	struct omap_dss_device *dssdev;
-	struct display_attribute *display_attr;
-
-	dssdev = container_of(kobj, struct omap_dss_device, kobj);
-	display_attr = container_of(attr, struct display_attribute, attr);
-
-	if (!display_attr->show)
-		return -ENOENT;
-
-	return display_attr->show(dssdev, buf);
-}
-
-static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
-		const char *buf, size_t size)
-{
-	struct omap_dss_device *dssdev;
-	struct display_attribute *display_attr;
-
-	dssdev = container_of(kobj, struct omap_dss_device, kobj);
-	display_attr = container_of(attr, struct display_attribute, attr);
-
-	if (!display_attr->store)
-		return -ENOENT;
-
-	return display_attr->store(dssdev, buf, size);
-}
-
-static const struct sysfs_ops display_sysfs_ops = {
-	.show = display_attr_show,
-	.store = display_attr_store,
-};
-
-static struct kobj_type display_ktype = {
-	.sysfs_ops = &display_sysfs_ops,
-	.default_attrs = display_sysfs_attrs,
-};
-
-int display_init_sysfs(struct platform_device *pdev)
-{
-	struct omap_dss_device *dssdev = NULL;
-	int r;
-
-	for_each_dss_dev(dssdev) {
-		r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
-			&pdev->dev.kobj, "%s", dssdev->alias);
-		if (r) {
-			DSSERR("failed to create sysfs files\n");
-			omap_dss_put_device(dssdev);
-			goto err;
-		}
-	}
-
-	return 0;
-
-err:
-	display_uninit_sysfs(pdev);
-
-	return r;
-}
-
-void display_uninit_sysfs(struct platform_device *pdev)
-{
-	struct omap_dss_device *dssdev = NULL;
-
-	for_each_dss_dev(dssdev) {
-		if (kobject_name(&dssdev->kobj) == NULL)
-			continue;
-
-		kobject_del(&dssdev->kobj);
-		kobject_put(&dssdev->kobj);
-
-		memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
-	}
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index ef5b902..9f3dd09b 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -78,55 +78,6 @@
 }
 EXPORT_SYMBOL(omapdss_default_get_timings);
 
-int dss_suspend_all_devices(void)
-{
-	struct omap_dss_device *dssdev = NULL;
-
-	for_each_dss_dev(dssdev) {
-		if (!dssdev->driver)
-			continue;
-
-		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
-			dssdev->driver->disable(dssdev);
-			dssdev->activate_after_resume = true;
-		} else {
-			dssdev->activate_after_resume = false;
-		}
-	}
-
-	return 0;
-}
-
-int dss_resume_all_devices(void)
-{
-	struct omap_dss_device *dssdev = NULL;
-
-	for_each_dss_dev(dssdev) {
-		if (!dssdev->driver)
-			continue;
-
-		if (dssdev->activate_after_resume) {
-			dssdev->driver->enable(dssdev);
-			dssdev->activate_after_resume = false;
-		}
-	}
-
-	return 0;
-}
-
-void dss_disable_all_devices(void)
-{
-	struct omap_dss_device *dssdev = NULL;
-
-	for_each_dss_dev(dssdev) {
-		if (!dssdev->driver)
-			continue;
-
-		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-			dssdev->driver->disable(dssdev);
-	}
-}
-
 static LIST_HEAD(panel_list);
 static DEFINE_MUTEX(panel_list_mutex);
 static int disp_num_counter;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 7953e6a..97ea602 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -334,7 +334,7 @@
 static int dpi_set_mode(struct dpi_data *dpi)
 {
 	struct omap_dss_device *out = &dpi->output;
-	struct omap_overlay_manager *mgr = out->manager;
+	enum omap_channel channel = out->dispc_channel;
 	struct omap_video_timings *t = &dpi->timings;
 	int lck_div = 0, pck_div = 0;
 	unsigned long fck = 0;
@@ -342,7 +342,7 @@
 	int r = 0;
 
 	if (dpi->pll)
-		r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck,
+		r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck,
 				&lck_div, &pck_div);
 	else
 		r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -359,7 +359,7 @@
 		t->pixelclock = pck;
 	}
 
-	dss_mgr_set_timings(mgr, t);
+	dss_mgr_set_timings(channel, t);
 
 	return 0;
 }
@@ -367,7 +367,7 @@
 static void dpi_config_lcd_manager(struct dpi_data *dpi)
 {
 	struct omap_dss_device *out = &dpi->output;
-	struct omap_overlay_manager *mgr = out->manager;
+	enum omap_channel channel = out->dispc_channel;
 
 	dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
 
@@ -378,13 +378,14 @@
 
 	dpi->mgr_config.lcden_sig_polarity = 0;
 
-	dss_mgr_set_lcd_config(mgr, &dpi->mgr_config);
+	dss_mgr_set_lcd_config(channel, &dpi->mgr_config);
 }
 
 static int dpi_display_enable(struct omap_dss_device *dssdev)
 {
 	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
 	struct omap_dss_device *out = &dpi->output;
+	enum omap_channel channel = out->dispc_channel;
 	int r;
 
 	mutex_lock(&dpi->lock);
@@ -395,7 +396,7 @@
 		goto err_no_reg;
 	}
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		r = -ENODEV;
 		goto err_no_out_mgr;
@@ -411,7 +412,7 @@
 	if (r)
 		goto err_get_dispc;
 
-	r = dss_dpi_select_source(out->port_num, out->manager->id);
+	r = dss_dpi_select_source(out->port_num, channel);
 	if (r)
 		goto err_src_sel;
 
@@ -429,7 +430,7 @@
 
 	mdelay(2);
 
-	r = dss_mgr_enable(out->manager);
+	r = dss_mgr_enable(channel);
 	if (r)
 		goto err_mgr_enable;
 
@@ -457,14 +458,14 @@
 static void dpi_display_disable(struct omap_dss_device *dssdev)
 {
 	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-	struct omap_overlay_manager *mgr = dpi->output.manager;
+	enum omap_channel channel = dpi->output.dispc_channel;
 
 	mutex_lock(&dpi->lock);
 
-	dss_mgr_disable(mgr);
+	dss_mgr_disable(channel);
 
 	if (dpi->pll) {
-		dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+		dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
 		dss_pll_disable(dpi->pll);
 	}
 
@@ -506,14 +507,17 @@
 			struct omap_video_timings *timings)
 {
 	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-	struct omap_overlay_manager *mgr = dpi->output.manager;
+	enum omap_channel channel = dpi->output.dispc_channel;
 	int lck_div, pck_div;
 	unsigned long fck;
 	unsigned long pck;
 	struct dpi_clk_calc_ctx ctx;
 	bool ok;
 
-	if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
+	if (timings->x_res % 8 != 0)
+		return -EINVAL;
+
+	if (!dispc_mgr_timings_ok(channel, timings))
 		return -EINVAL;
 
 	if (timings->pixelclock == 0)
@@ -660,7 +664,7 @@
 		struct omap_dss_device *dst)
 {
 	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-	struct omap_overlay_manager *mgr;
+	enum omap_channel channel = dpi->output.dispc_channel;
 	int r;
 
 	r = dpi_init_regulator(dpi);
@@ -669,11 +673,7 @@
 
 	dpi_init_pll(dpi);
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(channel, dssdev);
 	if (r)
 		return r;
 
@@ -681,7 +681,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(channel, dssdev);
 		return r;
 	}
 
@@ -691,6 +691,9 @@
 static void dpi_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+	enum omap_channel channel = dpi->output.dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -698,8 +701,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(channel, dssdev);
 }
 
 static const struct omapdss_dpi_ops dpi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 43be4b2..8730646 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -214,9 +214,9 @@
 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 
 static int dsi_display_init_dispc(struct platform_device *dsidev,
-	struct omap_overlay_manager *mgr);
+	enum omap_channel channel);
 static void dsi_display_uninit_dispc(struct platform_device *dsidev,
-	struct omap_overlay_manager *mgr);
+	enum omap_channel channel);
 
 static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
 
@@ -3826,19 +3826,19 @@
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	struct omap_overlay_manager *mgr = dsi->output.manager;
+	enum omap_channel dispc_channel = dssdev->dispc_channel;
 	int bpp = dsi_get_pixel_size(dsi->pix_fmt);
 	struct omap_dss_device *out = &dsi->output;
 	u8 data_type;
 	u16 word_count;
 	int r;
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		return -ENODEV;
 	}
 
-	r = dsi_display_init_dispc(dsidev, mgr);
+	r = dsi_display_init_dispc(dsidev, dispc_channel);
 	if (r)
 		goto err_init_dispc;
 
@@ -3876,7 +3876,7 @@
 		dsi_if_enable(dsidev, true);
 	}
 
-	r = dss_mgr_enable(mgr);
+	r = dss_mgr_enable(dispc_channel);
 	if (r)
 		goto err_mgr_enable;
 
@@ -3888,7 +3888,7 @@
 		dsi_vc_enable(dsidev, channel, false);
 	}
 err_pix_fmt:
-	dsi_display_uninit_dispc(dsidev, mgr);
+	dsi_display_uninit_dispc(dsidev, dispc_channel);
 err_init_dispc:
 	return r;
 }
@@ -3897,7 +3897,7 @@
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	struct omap_overlay_manager *mgr = dsi->output.manager;
+	enum omap_channel dispc_channel = dssdev->dispc_channel;
 
 	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
 		dsi_if_enable(dsidev, false);
@@ -3910,15 +3910,15 @@
 		dsi_if_enable(dsidev, true);
 	}
 
-	dss_mgr_disable(mgr);
+	dss_mgr_disable(dispc_channel);
 
-	dsi_display_uninit_dispc(dsidev, mgr);
+	dsi_display_uninit_dispc(dsidev, dispc_channel);
 }
 
 static void dsi_update_screen_dispc(struct platform_device *dsidev)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-	struct omap_overlay_manager *mgr = dsi->output.manager;
+	enum omap_channel dispc_channel = dsi->output.dispc_channel;
 	unsigned bytespp;
 	unsigned bytespl;
 	unsigned bytespf;
@@ -3980,9 +3980,9 @@
 		msecs_to_jiffies(250));
 	BUG_ON(r == 0);
 
-	dss_mgr_set_timings(mgr, &dsi->timings);
+	dss_mgr_set_timings(dispc_channel, &dsi->timings);
 
-	dss_mgr_start_update(mgr);
+	dss_mgr_start_update(dispc_channel);
 
 	if (dsi->te_enabled) {
 		/* disable LP_RX_TO, so that we can receive TE.  Time to wait
@@ -4105,17 +4105,17 @@
 }
 
 static int dsi_display_init_dispc(struct platform_device *dsidev,
-		struct omap_overlay_manager *mgr)
+		enum omap_channel channel)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 	int r;
 
-	dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
+	dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
 			OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
 			OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
 
 	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
-		r = dss_mgr_register_framedone_handler(mgr,
+		r = dss_mgr_register_framedone_handler(channel,
 				dsi_framedone_irq_callback, dsidev);
 		if (r) {
 			DSSERR("can't register FRAMEDONE handler\n");
@@ -4140,7 +4140,7 @@
 	dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
 	dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
 
-	dss_mgr_set_timings(mgr, &dsi->timings);
+	dss_mgr_set_timings(channel, &dsi->timings);
 
 	r = dsi_configure_dispc_clocks(dsidev);
 	if (r)
@@ -4151,28 +4151,28 @@
 			dsi_get_pixel_size(dsi->pix_fmt);
 	dsi->mgr_config.lcden_sig_polarity = 0;
 
-	dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
+	dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
 
 	return 0;
 err1:
 	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
-		dss_mgr_unregister_framedone_handler(mgr,
+		dss_mgr_unregister_framedone_handler(channel,
 				dsi_framedone_irq_callback, dsidev);
 err:
-	dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+	dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
 	return r;
 }
 
 static void dsi_display_uninit_dispc(struct platform_device *dsidev,
-		struct omap_overlay_manager *mgr)
+		enum omap_channel channel)
 {
 	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
 	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
-		dss_mgr_unregister_framedone_handler(mgr,
+		dss_mgr_unregister_framedone_handler(channel,
 				dsi_framedone_irq_callback, dsidev);
 
-	dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+	dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
 }
 
 static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4983,18 +4983,14 @@
 		struct omap_dss_device *dst)
 {
 	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-	struct omap_overlay_manager *mgr;
+	enum omap_channel dispc_channel = dssdev->dispc_channel;
 	int r;
 
 	r = dsi_regulator_init(dsidev);
 	if (r)
 		return r;
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(dispc_channel, dssdev);
 	if (r)
 		return r;
 
@@ -5002,7 +4998,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dssdev->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(dispc_channel, dssdev);
 		return r;
 	}
 
@@ -5012,6 +5008,8 @@
 static void dsi_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	enum omap_channel dispc_channel = dssdev->dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -5019,8 +5017,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(dispc_channel, dssdev);
 }
 
 static const struct omapdss_dsi_ops dsi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 9a64532..38e6ab5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -25,6 +25,8 @@
 
 #include <linux/interrupt.h>
 
+#include "omapdss.h"
+
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
@@ -205,29 +207,6 @@
 int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
 int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
 
-/* display */
-int dss_suspend_all_devices(void);
-int dss_resume_all_devices(void);
-void dss_disable_all_devices(void);
-
-int display_init_sysfs(struct platform_device *pdev);
-void display_uninit_sysfs(struct platform_device *pdev);
-
-/* manager */
-int dss_init_overlay_managers(void);
-void dss_uninit_overlay_managers(void);
-int dss_init_overlay_managers_sysfs(struct platform_device *pdev);
-void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev);
-int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
-		const struct omap_overlay_manager_info *info);
-int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
-		const struct omap_video_timings *timings);
-int dss_mgr_check(struct omap_overlay_manager *mgr,
-		struct omap_overlay_manager_info *info,
-		const struct omap_video_timings *mgr_timings,
-		const struct dss_lcd_mgr_config *config,
-		struct omap_overlay_info **overlay_infos);
-
 static inline bool dss_mgr_is_lcd(enum omap_channel id)
 {
 	if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 ||
@@ -237,24 +216,6 @@
 		return false;
 }
 
-int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
-		struct platform_device *pdev);
-void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr);
-
-/* overlay */
-void dss_init_overlays(struct platform_device *pdev);
-void dss_uninit_overlays(struct platform_device *pdev);
-void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
-int dss_ovl_simple_check(struct omap_overlay *ovl,
-		const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
-		const struct omap_video_timings *mgr_timings);
-bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
-		enum omap_color_mode mode);
-int dss_overlay_kobj_init(struct omap_overlay *ovl,
-		struct platform_device *pdev);
-void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
-
 /* DSS */
 int dss_init_platform_driver(void) __init;
 void dss_uninit_platform_driver(void);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 7103c65..f892ae15 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -165,9 +165,10 @@
 {
 	int r;
 	struct omap_video_timings *p;
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 	struct hdmi_wp_data *wp = &hdmi.wp;
 	struct dss_pll_clock_info hdmi_cinfo = { 0 };
+	unsigned pc;
 
 	r = hdmi_power_on_core(dssdev);
 	if (r)
@@ -181,7 +182,11 @@
 
 	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
 
-	hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
+	pc = p->pixelclock;
+	if (p->double_pixel)
+		pc *= 2;
+
+	hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
 
 	r = dss_pll_enable(&hdmi.pll.pll);
 	if (r) {
@@ -212,24 +217,24 @@
 	dispc_enable_gamma_table(0);
 
 	/* tv size */
-	dss_mgr_set_timings(mgr, p);
+	dss_mgr_set_timings(channel, p);
+
+	r = dss_mgr_enable(channel);
+	if (r)
+		goto err_mgr_enable;
 
 	r = hdmi_wp_video_start(&hdmi.wp);
 	if (r)
 		goto err_vid_enable;
 
-	r = dss_mgr_enable(mgr);
-	if (r)
-		goto err_mgr_enable;
-
 	hdmi_wp_set_irqenable(wp,
 		HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
 
 	return 0;
 
-err_mgr_enable:
-	hdmi_wp_video_stop(&hdmi.wp);
 err_vid_enable:
+	dss_mgr_disable(channel);
+err_mgr_enable:
 	hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
 err_phy_pwr:
 err_phy_cfg:
@@ -242,14 +247,14 @@
 
 static void hdmi_power_off_full(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
 	hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
 
-	dss_mgr_disable(mgr);
-
 	hdmi_wp_video_stop(&hdmi.wp);
 
+	dss_mgr_disable(channel);
+
 	hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
 
 	dss_pll_disable(&hdmi.pll.pll);
@@ -260,9 +265,7 @@
 static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
 					struct omap_video_timings *timings)
 {
-	struct omap_dss_device *out = &hdmi.output;
-
-	if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
+	if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
 		return -EINVAL;
 
 	return 0;
@@ -343,7 +346,7 @@
 
 	mutex_lock(&hdmi.lock);
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		r = -ENODEV;
 		goto err0;
@@ -433,18 +436,14 @@
 static int hdmi_connect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
-	struct omap_overlay_manager *mgr;
+	enum omap_channel channel = dssdev->dispc_channel;
 	int r;
 
 	r = hdmi_init_regulator();
 	if (r)
 		return r;
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(channel, dssdev);
 	if (r)
 		return r;
 
@@ -452,7 +451,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(channel, dssdev);
 		return r;
 	}
 
@@ -462,6 +461,8 @@
 static void hdmi_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	enum omap_channel channel = dssdev->dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -469,8 +470,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(channel, dssdev);
 }
 
 static int hdmi_read_edid(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a955a2c..a43f7b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -182,8 +182,9 @@
 {
 	int r;
 	struct omap_video_timings *p;
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 	struct dss_pll_clock_info hdmi_cinfo = { 0 };
+	unsigned pc;
 
 	r = hdmi_power_on_core(dssdev);
 	if (r)
@@ -193,7 +194,11 @@
 
 	DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
 
-	hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
+	pc = p->pixelclock;
+	if (p->double_pixel)
+		pc *= 2;
+
+	hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
 
 	/* disable and clear irqs */
 	hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -229,24 +234,24 @@
 	dispc_enable_gamma_table(0);
 
 	/* tv size */
-	dss_mgr_set_timings(mgr, p);
+	dss_mgr_set_timings(channel, p);
+
+	r = dss_mgr_enable(channel);
+	if (r)
+		goto err_mgr_enable;
 
 	r = hdmi_wp_video_start(&hdmi.wp);
 	if (r)
 		goto err_vid_enable;
 
-	r = dss_mgr_enable(mgr);
-	if (r)
-		goto err_mgr_enable;
-
 	hdmi_wp_set_irqenable(&hdmi.wp,
 			HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
 
 	return 0;
 
-err_mgr_enable:
-	hdmi_wp_video_stop(&hdmi.wp);
 err_vid_enable:
+	dss_mgr_disable(channel);
+err_mgr_enable:
 	hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
 err_phy_pwr:
 err_phy_cfg:
@@ -259,14 +264,14 @@
 
 static void hdmi_power_off_full(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = hdmi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
 	hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
 
-	dss_mgr_disable(mgr);
-
 	hdmi_wp_video_stop(&hdmi.wp);
 
+	dss_mgr_disable(channel);
+
 	hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
 
 	dss_pll_disable(&hdmi.pll.pll);
@@ -277,13 +282,7 @@
 static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
 					struct omap_video_timings *timings)
 {
-	struct omap_dss_device *out = &hdmi.output;
-
-	/* TODO: proper interlace support */
-	if (timings->interlace)
-		return -EINVAL;
-
-	if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
+	if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
 		return -EINVAL;
 
 	return 0;
@@ -373,7 +372,7 @@
 
 	mutex_lock(&hdmi.lock);
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		r = -ENODEV;
 		goto err0;
@@ -463,18 +462,14 @@
 static int hdmi_connect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
-	struct omap_overlay_manager *mgr;
+	enum omap_channel channel = dssdev->dispc_channel;
 	int r;
 
 	r = hdmi_init_regulator();
 	if (r)
 		return r;
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(channel, dssdev);
 	if (r)
 		return r;
 
@@ -482,7 +477,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(channel, dssdev);
 		return r;
 	}
 
@@ -492,6 +487,8 @@
 static void hdmi_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	enum omap_channel channel = dssdev->dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -499,8 +496,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(channel, dssdev);
 }
 
 static int hdmi_read_edid(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ea531d..6a39752 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,25 +292,36 @@
 {
 	DSSDBG("hdmi_core_init\n");
 
+	video_cfg->v_fc_config.timings = cfg->timings;
+
 	/* video core */
 	video_cfg->data_enable_pol = 1; /* It is always 1*/
-	video_cfg->v_fc_config.timings.hsync_level = cfg->timings.hsync_level;
-	video_cfg->v_fc_config.timings.x_res = cfg->timings.x_res;
-	video_cfg->v_fc_config.timings.hsw = cfg->timings.hsw - 1;
-	video_cfg->v_fc_config.timings.hbp = cfg->timings.hbp;
-	video_cfg->v_fc_config.timings.hfp = cfg->timings.hfp;
 	video_cfg->hblank = cfg->timings.hfp +
-				cfg->timings.hbp + cfg->timings.hsw - 1;
-	video_cfg->v_fc_config.timings.vsync_level = cfg->timings.vsync_level;
-	video_cfg->v_fc_config.timings.y_res = cfg->timings.y_res;
-	video_cfg->v_fc_config.timings.vsw = cfg->timings.vsw;
-	video_cfg->v_fc_config.timings.vfp = cfg->timings.vfp;
-	video_cfg->v_fc_config.timings.vbp = cfg->timings.vbp;
-	video_cfg->vblank_osc = 0; /* Always 0 - need to confirm */
+				cfg->timings.hbp + cfg->timings.hsw;
+	video_cfg->vblank_osc = 0;
 	video_cfg->vblank = cfg->timings.vsw +
 				cfg->timings.vfp + cfg->timings.vbp;
 	video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
-	video_cfg->v_fc_config.timings.interlace = cfg->timings.interlace;
+
+	if (cfg->timings.interlace) {
+		/* set vblank_osc if vblank is fractional */
+		if (video_cfg->vblank % 2 != 0)
+			video_cfg->vblank_osc = 1;
+
+		video_cfg->v_fc_config.timings.y_res /= 2;
+		video_cfg->vblank /= 2;
+		video_cfg->v_fc_config.timings.vfp /= 2;
+		video_cfg->v_fc_config.timings.vsw /= 2;
+		video_cfg->v_fc_config.timings.vbp /= 2;
+	}
+
+	if (cfg->timings.double_pixel) {
+		video_cfg->v_fc_config.timings.x_res *= 2;
+		video_cfg->hblank *= 2;
+		video_cfg->v_fc_config.timings.hfp *= 2;
+		video_cfg->v_fc_config.timings.hsw *= 2;
+		video_cfg->v_fc_config.timings.hbp *= 2;
+	}
 }
 
 /* DSS_HDMI_CORE_VIDEO_CONFIG */
@@ -377,6 +388,11 @@
 	/* select DVI mode */
 	REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
 			cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+
+	if (cfg->v_fc_config.timings.double_pixel)
+		REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
+	else
+		REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
 }
 
 static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 7c544bc..13442b9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -165,12 +165,24 @@
 {
 	u32 timing_h = 0;
 	u32 timing_v = 0;
+	unsigned hsw_offset = 1;
 
 	DSSDBG("Enter hdmi_wp_video_config_timing\n");
 
+	/*
+	 * On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
+	 * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+	 * However, we don't support OMAP5 ES1 at all, so we can just check for
+	 * OMAP4 here.
+	 */
+	if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
+	    omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
+	    omapdss_get_version() == OMAPDSS_VER_OMAP4)
+		hsw_offset = 0;
+
 	timing_h |= FLD_VAL(timings->hbp, 31, 20);
 	timing_h |= FLD_VAL(timings->hfp, 19, 8);
-	timing_h |= FLD_VAL(timings->hsw, 7, 0);
+	timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
 	hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
 
 	timing_v |= FLD_VAL(timings->vbp, 31, 20);
@@ -187,8 +199,6 @@
 	video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
 	video_fmt->y_res = param->timings.y_res;
 	video_fmt->x_res = param->timings.x_res;
-	if (param->timings.interlace)
-		video_fmt->y_res /= 2;
 
 	timings->hbp = param->timings.hbp;
 	timings->hfp = param->timings.hfp;
@@ -196,9 +206,25 @@
 	timings->vbp = param->timings.vbp;
 	timings->vfp = param->timings.vfp;
 	timings->vsw = param->timings.vsw;
+
 	timings->vsync_level = param->timings.vsync_level;
 	timings->hsync_level = param->timings.hsync_level;
 	timings->interlace = param->timings.interlace;
+	timings->double_pixel = param->timings.double_pixel;
+
+	if (param->timings.interlace) {
+		video_fmt->y_res /= 2;
+		timings->vbp /= 2;
+		timings->vfp /= 2;
+		timings->vsw /= 2;
+	}
+
+	if (param->timings.double_pixel) {
+		video_fmt->x_res *= 2;
+		timings->hfp *= 2;
+		timings->hsw *= 2;
+		timings->hbp *= 2;
+	}
 }
 
 void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c b/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c
deleted file mode 100644
index a7414fb..0000000
--- a/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "MANAGER"
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
-}
-
-static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
-{
-	struct omap_dss_device *dssdev = mgr->get_device(mgr);
-
-	return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ?
-			dssdev->name : "<none>");
-}
-
-static int manager_display_match(struct omap_dss_device *dssdev, void *data)
-{
-	const char *str = data;
-
-	return sysfs_streq(dssdev->name, str);
-}
-
-static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
-		const char *buf, size_t size)
-{
-	int r = 0;
-	size_t len = size;
-	struct omap_dss_device *dssdev = NULL;
-	struct omap_dss_device *old_dssdev;
-
-	if (buf[size-1] == '\n')
-		--len;
-
-	if (len > 0)
-		dssdev = omap_dss_find_device((void *)buf,
-			manager_display_match);
-
-	if (len > 0 && dssdev == NULL)
-		return -EINVAL;
-
-	if (dssdev) {
-		DSSDBG("display %s found\n", dssdev->name);
-
-		if (omapdss_device_is_connected(dssdev)) {
-			DSSERR("new display is already connected\n");
-			r = -EINVAL;
-			goto put_device;
-		}
-
-		if (omapdss_device_is_enabled(dssdev)) {
-			DSSERR("new display is not disabled\n");
-			r = -EINVAL;
-			goto put_device;
-		}
-	}
-
-	old_dssdev = mgr->get_device(mgr);
-	if (old_dssdev) {
-		if (omapdss_device_is_enabled(old_dssdev)) {
-			DSSERR("old display is not disabled\n");
-			r = -EINVAL;
-			goto put_device;
-		}
-
-		old_dssdev->driver->disconnect(old_dssdev);
-	}
-
-	if (dssdev) {
-		r = dssdev->driver->connect(dssdev);
-		if (r) {
-			DSSERR("failed to connect new device\n");
-			goto put_device;
-		}
-
-		old_dssdev = mgr->get_device(mgr);
-		if (old_dssdev != dssdev) {
-			DSSERR("failed to connect device to this manager\n");
-			dssdev->driver->disconnect(dssdev);
-			goto put_device;
-		}
-
-		r = mgr->apply(mgr);
-		if (r) {
-			DSSERR("failed to apply dispc config\n");
-			goto put_device;
-		}
-	}
-
-put_device:
-	if (dssdev)
-		omap_dss_put_device(dssdev);
-
-	return r ? r : size;
-}
-
-static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
-					  char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
-}
-
-static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
-					   const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	u32 color;
-	int r;
-
-	r = kstrtouint(buf, 0, &color);
-	if (r)
-		return r;
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.default_color = color;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static const char *trans_key_type_str[] = {
-	"gfx-destination",
-	"video-source",
-};
-
-static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
-					   char *buf)
-{
-	enum omap_dss_trans_key_type key_type;
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	key_type = info.trans_key_type;
-	BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
-
-	return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
-}
-
-static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
-					    const char *buf, size_t size)
-{
-	enum omap_dss_trans_key_type key_type;
-	struct omap_overlay_manager_info info;
-	int r;
-
-	for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
-			key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
-		if (sysfs_streq(buf, trans_key_type_str[key_type]))
-			break;
-	}
-
-	if (key_type == ARRAY_SIZE(trans_key_type_str))
-		return -EINVAL;
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.trans_key_type = key_type;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
-					    char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
-}
-
-static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
-					     const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	u32 key_value;
-	int r;
-
-	r = kstrtouint(buf, 0, &key_value);
-	if (r)
-		return r;
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.trans_key = key_value;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
-					      char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
-}
-
-static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
-					       const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	bool enable;
-	int r;
-
-	r = strtobool(buf, &enable);
-	if (r)
-		return r;
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.trans_enabled = enable;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t manager_alpha_blending_enabled_show(
-		struct omap_overlay_manager *mgr, char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
-		return -ENODEV;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-		info.partial_alpha_enabled);
-}
-
-static ssize_t manager_alpha_blending_enabled_store(
-		struct omap_overlay_manager *mgr,
-		const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	bool enable;
-	int r;
-
-	if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
-		return -ENODEV;
-
-	r = strtobool(buf, &enable);
-	if (r)
-		return r;
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.partial_alpha_enabled = enable;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
-		char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
-}
-
-static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
-		const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	int r;
-	bool enable;
-
-	if (!dss_has_feature(FEAT_CPR))
-		return -ENODEV;
-
-	r = strtobool(buf, &enable);
-	if (r)
-		return r;
-
-	mgr->get_manager_info(mgr, &info);
-
-	if (info.cpr_enable == enable)
-		return size;
-
-	info.cpr_enable = enable;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
-		char *buf)
-{
-	struct omap_overlay_manager_info info;
-
-	mgr->get_manager_info(mgr, &info);
-
-	return snprintf(buf, PAGE_SIZE,
-			"%d %d %d %d %d %d %d %d %d\n",
-			info.cpr_coefs.rr,
-			info.cpr_coefs.rg,
-			info.cpr_coefs.rb,
-			info.cpr_coefs.gr,
-			info.cpr_coefs.gg,
-			info.cpr_coefs.gb,
-			info.cpr_coefs.br,
-			info.cpr_coefs.bg,
-			info.cpr_coefs.bb);
-}
-
-static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
-		const char *buf, size_t size)
-{
-	struct omap_overlay_manager_info info;
-	struct omap_dss_cpr_coefs coefs;
-	int r, i;
-	s16 *arr;
-
-	if (!dss_has_feature(FEAT_CPR))
-		return -ENODEV;
-
-	if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
-				&coefs.rr, &coefs.rg, &coefs.rb,
-				&coefs.gr, &coefs.gg, &coefs.gb,
-				&coefs.br, &coefs.bg, &coefs.bb) != 9)
-		return -EINVAL;
-
-	arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
-		coefs.gr, coefs.gg, coefs.gb,
-		coefs.br, coefs.bg, coefs.bb };
-
-	for (i = 0; i < 9; ++i) {
-		if (arr[i] < -512 || arr[i] > 511)
-			return -EINVAL;
-	}
-
-	mgr->get_manager_info(mgr, &info);
-
-	info.cpr_coefs = coefs;
-
-	r = mgr->set_manager_info(mgr, &info);
-	if (r)
-		return r;
-
-	r = mgr->apply(mgr);
-	if (r)
-		return r;
-
-	return size;
-}
-
-struct manager_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct omap_overlay_manager *, char *);
-	ssize_t	(*store)(struct omap_overlay_manager *, const char *, size_t);
-};
-
-#define MANAGER_ATTR(_name, _mode, _show, _store) \
-	struct manager_attribute manager_attr_##_name = \
-	__ATTR(_name, _mode, _show, _store)
-
-static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
-static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
-		manager_display_show, manager_display_store);
-static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
-		manager_default_color_show, manager_default_color_store);
-static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
-		manager_trans_key_type_show, manager_trans_key_type_store);
-static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
-		manager_trans_key_value_show, manager_trans_key_value_store);
-static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
-		manager_trans_key_enabled_show,
-		manager_trans_key_enabled_store);
-static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
-		manager_alpha_blending_enabled_show,
-		manager_alpha_blending_enabled_store);
-static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
-		manager_cpr_enable_show,
-		manager_cpr_enable_store);
-static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
-		manager_cpr_coef_show,
-		manager_cpr_coef_store);
-
-
-static struct attribute *manager_sysfs_attrs[] = {
-	&manager_attr_name.attr,
-	&manager_attr_display.attr,
-	&manager_attr_default_color.attr,
-	&manager_attr_trans_key_type.attr,
-	&manager_attr_trans_key_value.attr,
-	&manager_attr_trans_key_enabled.attr,
-	&manager_attr_alpha_blending_enabled.attr,
-	&manager_attr_cpr_enable.attr,
-	&manager_attr_cpr_coef.attr,
-	NULL
-};
-
-static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
-		char *buf)
-{
-	struct omap_overlay_manager *manager;
-	struct manager_attribute *manager_attr;
-
-	manager = container_of(kobj, struct omap_overlay_manager, kobj);
-	manager_attr = container_of(attr, struct manager_attribute, attr);
-
-	if (!manager_attr->show)
-		return -ENOENT;
-
-	return manager_attr->show(manager, buf);
-}
-
-static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
-		const char *buf, size_t size)
-{
-	struct omap_overlay_manager *manager;
-	struct manager_attribute *manager_attr;
-
-	manager = container_of(kobj, struct omap_overlay_manager, kobj);
-	manager_attr = container_of(attr, struct manager_attribute, attr);
-
-	if (!manager_attr->store)
-		return -ENOENT;
-
-	return manager_attr->store(manager, buf, size);
-}
-
-static const struct sysfs_ops manager_sysfs_ops = {
-	.show = manager_attr_show,
-	.store = manager_attr_store,
-};
-
-static struct kobj_type manager_ktype = {
-	.sysfs_ops = &manager_sysfs_ops,
-	.default_attrs = manager_sysfs_attrs,
-};
-
-int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
-		struct platform_device *pdev)
-{
-	return kobject_init_and_add(&mgr->kobj, &manager_ktype,
-			&pdev->dev.kobj, "manager%d", mgr->id);
-}
-
-void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr)
-{
-	kobject_del(&mgr->kobj);
-	kobject_put(&mgr->kobj);
-
-	memset(&mgr->kobj, 0, sizeof(mgr->kobj));
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/manager.c b/drivers/gpu/drm/omapdrm/dss/manager.c
deleted file mode 100644
index 08a67f4..0000000
--- a/drivers/gpu/drm/omapdrm/dss/manager.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/manager.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "MANAGER"
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static int num_managers;
-static struct omap_overlay_manager *managers;
-
-int dss_init_overlay_managers(void)
-{
-	int i;
-
-	num_managers = dss_feat_get_num_mgrs();
-
-	managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers,
-			GFP_KERNEL);
-
-	BUG_ON(managers == NULL);
-
-	for (i = 0; i < num_managers; ++i) {
-		struct omap_overlay_manager *mgr = &managers[i];
-
-		switch (i) {
-		case 0:
-			mgr->name = "lcd";
-			mgr->id = OMAP_DSS_CHANNEL_LCD;
-			break;
-		case 1:
-			mgr->name = "tv";
-			mgr->id = OMAP_DSS_CHANNEL_DIGIT;
-			break;
-		case 2:
-			mgr->name = "lcd2";
-			mgr->id = OMAP_DSS_CHANNEL_LCD2;
-			break;
-		case 3:
-			mgr->name = "lcd3";
-			mgr->id = OMAP_DSS_CHANNEL_LCD3;
-			break;
-		}
-
-		mgr->caps = 0;
-		mgr->supported_displays =
-			dss_feat_get_supported_displays(mgr->id);
-		mgr->supported_outputs =
-			dss_feat_get_supported_outputs(mgr->id);
-
-		INIT_LIST_HEAD(&mgr->overlays);
-	}
-
-	return 0;
-}
-
-int dss_init_overlay_managers_sysfs(struct platform_device *pdev)
-{
-	int i, r;
-
-	for (i = 0; i < num_managers; ++i) {
-		struct omap_overlay_manager *mgr = &managers[i];
-
-		r = dss_manager_kobj_init(mgr, pdev);
-		if (r)
-			DSSERR("failed to create sysfs file\n");
-	}
-
-	return 0;
-}
-
-void dss_uninit_overlay_managers(void)
-{
-	kfree(managers);
-	managers = NULL;
-	num_managers = 0;
-}
-
-void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev)
-{
-	int i;
-
-	for (i = 0; i < num_managers; ++i) {
-		struct omap_overlay_manager *mgr = &managers[i];
-
-		dss_manager_kobj_uninit(mgr);
-	}
-}
-
-int omap_dss_get_num_overlay_managers(void)
-{
-	return num_managers;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
-
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
-{
-	if (num >= num_managers)
-		return NULL;
-
-	return &managers[num];
-}
-EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-
-int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
-		const struct omap_overlay_manager_info *info)
-{
-	if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) {
-		/*
-		 * OMAP3 supports only graphics source transparency color key
-		 * and alpha blending simultaneously. See TRM 15.4.2.4.2.2
-		 * Alpha Mode.
-		 */
-		if (info->partial_alpha_enabled && info->trans_enabled
-			&& info->trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST) {
-			DSSERR("check_manager: illegal transparency key\n");
-			return -EINVAL;
-		}
-	}
-
-	return 0;
-}
-
-static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
-		struct omap_overlay_info **overlay_infos)
-{
-	struct omap_overlay *ovl1, *ovl2;
-	struct omap_overlay_info *info1, *info2;
-
-	list_for_each_entry(ovl1, &mgr->overlays, list) {
-		info1 = overlay_infos[ovl1->id];
-
-		if (info1 == NULL)
-			continue;
-
-		list_for_each_entry(ovl2, &mgr->overlays, list) {
-			if (ovl1 == ovl2)
-				continue;
-
-			info2 = overlay_infos[ovl2->id];
-
-			if (info2 == NULL)
-				continue;
-
-			if (info1->zorder == info2->zorder) {
-				DSSERR("overlays %d and %d have the same "
-						"zorder %d\n",
-					ovl1->id, ovl2->id, info1->zorder);
-				return -EINVAL;
-			}
-		}
-	}
-
-	return 0;
-}
-
-int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
-		const struct omap_video_timings *timings)
-{
-	if (!dispc_mgr_timings_ok(mgr->id, timings)) {
-		DSSERR("check_manager: invalid timings\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr,
-		const struct dss_lcd_mgr_config *config)
-{
-	struct dispc_clock_info cinfo = config->clock_info;
-	int dl = config->video_port_width;
-	bool stallmode = config->stallmode;
-	bool fifohandcheck = config->fifohandcheck;
-
-	if (cinfo.lck_div < 1 || cinfo.lck_div > 255)
-		return -EINVAL;
-
-	if (cinfo.pck_div < 1 || cinfo.pck_div > 255)
-		return -EINVAL;
-
-	if (dl != 12 && dl != 16 && dl != 18 && dl != 24)
-		return -EINVAL;
-
-	/* fifohandcheck should be used only with stallmode */
-	if (!stallmode && fifohandcheck)
-		return -EINVAL;
-
-	/*
-	 * io pad mode can be only checked by using dssdev connected to the
-	 * manager. Ignore checking these for now, add checks when manager
-	 * is capable of holding information related to the connected interface
-	 */
-
-	return 0;
-}
-
-int dss_mgr_check(struct omap_overlay_manager *mgr,
-		struct omap_overlay_manager_info *info,
-		const struct omap_video_timings *mgr_timings,
-		const struct dss_lcd_mgr_config *lcd_config,
-		struct omap_overlay_info **overlay_infos)
-{
-	struct omap_overlay *ovl;
-	int r;
-
-	if (dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) {
-		r = dss_mgr_check_zorder(mgr, overlay_infos);
-		if (r)
-			return r;
-	}
-
-	r = dss_mgr_check_timings(mgr, mgr_timings);
-	if (r)
-		return r;
-
-	r = dss_mgr_check_lcd_config(mgr, lcd_config);
-	if (r)
-		return r;
-
-	list_for_each_entry(ovl, &mgr->overlays, list) {
-		struct omap_overlay_info *oi;
-		int r;
-
-		oi = overlay_infos[ovl->id];
-
-		if (oi == NULL)
-			continue;
-
-		r = dss_ovl_check(ovl, oi, mgr_timings);
-		if (r)
-			return r;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
new file mode 100644
index 0000000..d7e7c90
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_DSS_H
+#define __OMAP_DRM_DSS_H
+
+#include <video/omapdss.h>
+
+u32 dispc_read_irqstatus(void);
+void dispc_clear_irqstatus(u32 mask);
+u32 dispc_read_irqenable(void);
+void dispc_write_irqenable(u32 mask);
+
+int dispc_request_irq(irq_handler_t handler, void *dev_id);
+void dispc_free_irq(void *dev_id);
+
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
+
+void dispc_mgr_enable(enum omap_channel channel, bool enable);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
+bool dispc_mgr_go_busy(enum omap_channel channel);
+void dispc_mgr_go(enum omap_channel channel);
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+		const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(enum omap_channel channel,
+		const struct omap_video_timings *timings);
+void dispc_mgr_setup(enum omap_channel channel,
+		const struct omap_overlay_manager_info *info);
+
+int dispc_ovl_enable(enum omap_plane plane, bool enable);
+bool dispc_ovl_enabled(enum omap_plane plane);
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+		enum omap_channel channel);
+int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
+		bool replication, const struct omap_video_timings *mgr_timings,
+		bool mem_to_mem);
+
+enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
+
+struct dss_mgr_ops {
+	int (*connect)(enum omap_channel channel,
+		struct omap_dss_device *dst);
+	void (*disconnect)(enum omap_channel channel,
+		struct omap_dss_device *dst);
+
+	void (*start_update)(enum omap_channel channel);
+	int (*enable)(enum omap_channel channel);
+	void (*disable)(enum omap_channel channel);
+	void (*set_timings)(enum omap_channel channel,
+			const struct omap_video_timings *timings);
+	void (*set_lcd_config)(enum omap_channel channel,
+			const struct dss_lcd_mgr_config *config);
+	int (*register_framedone_handler)(enum omap_channel channel,
+			void (*handler)(void *), void *data);
+	void (*unregister_framedone_handler)(enum omap_channel channel,
+			void (*handler)(void *), void *data);
+};
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+void dss_uninstall_mgr_ops(void);
+
+int dss_mgr_connect(enum omap_channel channel,
+		struct omap_dss_device *dst);
+void dss_mgr_disconnect(enum omap_channel channel,
+		struct omap_dss_device *dst);
+void dss_mgr_set_timings(enum omap_channel channel,
+		const struct omap_video_timings *timings);
+void dss_mgr_set_lcd_config(enum omap_channel channel,
+		const struct dss_lcd_mgr_config *config);
+int dss_mgr_enable(enum omap_channel channel);
+void dss_mgr_disable(enum omap_channel channel);
+void dss_mgr_start_update(enum omap_channel channel);
+int dss_mgr_register_framedone_handler(enum omap_channel channel,
+		void (*handler)(void *), void *data);
+void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+		void (*handler)(void *), void *data);
+
+#endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 1607215..829232a 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -169,24 +169,6 @@
 }
 EXPORT_SYMBOL(omapdss_find_output_from_display);
 
-struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev)
-{
-	struct omap_dss_device *out;
-	struct omap_overlay_manager *mgr;
-
-	out = omapdss_find_output_from_display(dssdev);
-
-	if (out == NULL)
-		return NULL;
-
-	mgr = out->manager;
-
-	omap_dss_put_device(out);
-
-	return mgr;
-}
-EXPORT_SYMBOL(omapdss_find_mgr_from_display);
-
 static const struct dss_mgr_ops *dss_mgr_ops;
 
 int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
@@ -206,62 +188,62 @@
 }
 EXPORT_SYMBOL(dss_uninstall_mgr_ops);
 
-int dss_mgr_connect(struct omap_overlay_manager *mgr,
+int dss_mgr_connect(enum omap_channel channel,
 		struct omap_dss_device *dst)
 {
-	return dss_mgr_ops->connect(mgr, dst);
+	return dss_mgr_ops->connect(channel, dst);
 }
 EXPORT_SYMBOL(dss_mgr_connect);
 
-void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
+void dss_mgr_disconnect(enum omap_channel channel,
 		struct omap_dss_device *dst)
 {
-	dss_mgr_ops->disconnect(mgr, dst);
+	dss_mgr_ops->disconnect(channel, dst);
 }
 EXPORT_SYMBOL(dss_mgr_disconnect);
 
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+void dss_mgr_set_timings(enum omap_channel channel,
 		const struct omap_video_timings *timings)
 {
-	dss_mgr_ops->set_timings(mgr, timings);
+	dss_mgr_ops->set_timings(channel, timings);
 }
 EXPORT_SYMBOL(dss_mgr_set_timings);
 
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+void dss_mgr_set_lcd_config(enum omap_channel channel,
 		const struct dss_lcd_mgr_config *config)
 {
-	dss_mgr_ops->set_lcd_config(mgr, config);
+	dss_mgr_ops->set_lcd_config(channel, config);
 }
 EXPORT_SYMBOL(dss_mgr_set_lcd_config);
 
-int dss_mgr_enable(struct omap_overlay_manager *mgr)
+int dss_mgr_enable(enum omap_channel channel)
 {
-	return dss_mgr_ops->enable(mgr);
+	return dss_mgr_ops->enable(channel);
 }
 EXPORT_SYMBOL(dss_mgr_enable);
 
-void dss_mgr_disable(struct omap_overlay_manager *mgr)
+void dss_mgr_disable(enum omap_channel channel)
 {
-	dss_mgr_ops->disable(mgr);
+	dss_mgr_ops->disable(channel);
 }
 EXPORT_SYMBOL(dss_mgr_disable);
 
-void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+void dss_mgr_start_update(enum omap_channel channel)
 {
-	dss_mgr_ops->start_update(mgr);
+	dss_mgr_ops->start_update(channel);
 }
 EXPORT_SYMBOL(dss_mgr_start_update);
 
-int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+int dss_mgr_register_framedone_handler(enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
-	return dss_mgr_ops->register_framedone_handler(mgr, handler, data);
+	return dss_mgr_ops->register_framedone_handler(channel, handler, data);
 }
 EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
 
-void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
-	dss_mgr_ops->unregister_framedone_handler(mgr, handler, data);
+	dss_mgr_ops->unregister_framedone_handler(channel, handler, data);
 }
 EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c b/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c
deleted file mode 100644
index 4cc5ddebf..0000000
--- a/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "OVERLAY"
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/platform_device.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
-}
-
-static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			ovl->manager ? ovl->manager->name : "<none>");
-}
-
-static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
-		size_t size)
-{
-	int i, r;
-	struct omap_overlay_manager *mgr = NULL;
-	struct omap_overlay_manager *old_mgr;
-	int len = size;
-
-	if (buf[size-1] == '\n')
-		--len;
-
-	if (len > 0) {
-		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
-			mgr = omap_dss_get_overlay_manager(i);
-
-			if (sysfs_streq(buf, mgr->name))
-				break;
-
-			mgr = NULL;
-		}
-	}
-
-	if (len > 0 && mgr == NULL)
-		return -EINVAL;
-
-	if (mgr)
-		DSSDBG("manager %s found\n", mgr->name);
-
-	if (mgr == ovl->manager)
-		return size;
-
-	old_mgr = ovl->manager;
-
-	r = dispc_runtime_get();
-	if (r)
-		return r;
-
-	/* detach old manager */
-	if (old_mgr) {
-		r = ovl->unset_manager(ovl);
-		if (r) {
-			DSSERR("detach failed\n");
-			goto err;
-		}
-
-		r = old_mgr->apply(old_mgr);
-		if (r)
-			goto err;
-	}
-
-	if (mgr) {
-		r = ovl->set_manager(ovl, mgr);
-		if (r) {
-			DSSERR("Failed to attach overlay\n");
-			goto err;
-		}
-
-		r = mgr->apply(mgr);
-		if (r)
-			goto err;
-	}
-
-	dispc_runtime_put();
-
-	return size;
-
-err:
-	dispc_runtime_put();
-	return r;
-}
-
-static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
-			info.width, info.height);
-}
-
-static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
-}
-
-static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
-			info.pos_x, info.pos_y);
-}
-
-static ssize_t overlay_position_store(struct omap_overlay *ovl,
-		const char *buf, size_t size)
-{
-	int r;
-	char *last;
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	info.pos_x = simple_strtoul(buf, &last, 10);
-	++last;
-	if (last - buf >= size)
-		return -EINVAL;
-
-	info.pos_y = simple_strtoul(last, &last, 10);
-
-	r = ovl->set_overlay_info(ovl, &info);
-	if (r)
-		return r;
-
-	if (ovl->manager) {
-		r = ovl->manager->apply(ovl->manager);
-		if (r)
-			return r;
-	}
-
-	return size;
-}
-
-static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d,%d\n",
-			info.out_width, info.out_height);
-}
-
-static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
-		const char *buf, size_t size)
-{
-	int r;
-	char *last;
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	info.out_width = simple_strtoul(buf, &last, 10);
-	++last;
-	if (last - buf >= size)
-		return -EINVAL;
-
-	info.out_height = simple_strtoul(last, &last, 10);
-
-	r = ovl->set_overlay_info(ovl, &info);
-	if (r)
-		return r;
-
-	if (ovl->manager) {
-		r = ovl->manager->apply(ovl->manager);
-		if (r)
-			return r;
-	}
-
-	return size;
-}
-
-static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
-}
-
-static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
-		size_t size)
-{
-	int r;
-	bool enable;
-
-	r = strtobool(buf, &enable);
-	if (r)
-		return r;
-
-	if (enable)
-		r = ovl->enable(ovl);
-	else
-		r = ovl->disable(ovl);
-
-	if (r)
-		return r;
-
-	return size;
-}
-
-static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			info.global_alpha);
-}
-
-static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
-		const char *buf, size_t size)
-{
-	int r;
-	u8 alpha;
-	struct omap_overlay_info info;
-
-	if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
-		return -ENODEV;
-
-	r = kstrtou8(buf, 0, &alpha);
-	if (r)
-		return r;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	info.global_alpha = alpha;
-
-	r = ovl->set_overlay_info(ovl, &info);
-	if (r)
-		return r;
-
-	if (ovl->manager) {
-		r = ovl->manager->apply(ovl->manager);
-		if (r)
-			return r;
-	}
-
-	return size;
-}
-
-static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
-		char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			info.pre_mult_alpha);
-}
-
-static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
-		const char *buf, size_t size)
-{
-	int r;
-	u8 alpha;
-	struct omap_overlay_info info;
-
-	if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
-		return -ENODEV;
-
-	r = kstrtou8(buf, 0, &alpha);
-	if (r)
-		return r;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	info.pre_mult_alpha = alpha;
-
-	r = ovl->set_overlay_info(ovl, &info);
-	if (r)
-		return r;
-
-	if (ovl->manager) {
-		r = ovl->manager->apply(ovl->manager);
-		if (r)
-			return r;
-	}
-
-	return size;
-}
-
-static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
-{
-	struct omap_overlay_info info;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
-}
-
-static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
-		const char *buf, size_t size)
-{
-	int r;
-	u8 zorder;
-	struct omap_overlay_info info;
-
-	if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
-		return -ENODEV;
-
-	r = kstrtou8(buf, 0, &zorder);
-	if (r)
-		return r;
-
-	ovl->get_overlay_info(ovl, &info);
-
-	info.zorder = zorder;
-
-	r = ovl->set_overlay_info(ovl, &info);
-	if (r)
-		return r;
-
-	if (ovl->manager) {
-		r = ovl->manager->apply(ovl->manager);
-		if (r)
-			return r;
-	}
-
-	return size;
-}
-
-struct overlay_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct omap_overlay *, char *);
-	ssize_t	(*store)(struct omap_overlay *, const char *, size_t);
-};
-
-#define OVERLAY_ATTR(_name, _mode, _show, _store) \
-	struct overlay_attribute overlay_attr_##_name = \
-	__ATTR(_name, _mode, _show, _store)
-
-static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
-static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
-		overlay_manager_show, overlay_manager_store);
-static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
-static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
-static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
-		overlay_position_show, overlay_position_store);
-static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
-		overlay_output_size_show, overlay_output_size_store);
-static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
-		overlay_enabled_show, overlay_enabled_store);
-static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
-		overlay_global_alpha_show, overlay_global_alpha_store);
-static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
-		overlay_pre_mult_alpha_show,
-		overlay_pre_mult_alpha_store);
-static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
-		overlay_zorder_show, overlay_zorder_store);
-
-static struct attribute *overlay_sysfs_attrs[] = {
-	&overlay_attr_name.attr,
-	&overlay_attr_manager.attr,
-	&overlay_attr_input_size.attr,
-	&overlay_attr_screen_width.attr,
-	&overlay_attr_position.attr,
-	&overlay_attr_output_size.attr,
-	&overlay_attr_enabled.attr,
-	&overlay_attr_global_alpha.attr,
-	&overlay_attr_pre_mult_alpha.attr,
-	&overlay_attr_zorder.attr,
-	NULL
-};
-
-static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
-		char *buf)
-{
-	struct omap_overlay *overlay;
-	struct overlay_attribute *overlay_attr;
-
-	overlay = container_of(kobj, struct omap_overlay, kobj);
-	overlay_attr = container_of(attr, struct overlay_attribute, attr);
-
-	if (!overlay_attr->show)
-		return -ENOENT;
-
-	return overlay_attr->show(overlay, buf);
-}
-
-static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
-		const char *buf, size_t size)
-{
-	struct omap_overlay *overlay;
-	struct overlay_attribute *overlay_attr;
-
-	overlay = container_of(kobj, struct omap_overlay, kobj);
-	overlay_attr = container_of(attr, struct overlay_attribute, attr);
-
-	if (!overlay_attr->store)
-		return -ENOENT;
-
-	return overlay_attr->store(overlay, buf, size);
-}
-
-static const struct sysfs_ops overlay_sysfs_ops = {
-	.show = overlay_attr_show,
-	.store = overlay_attr_store,
-};
-
-static struct kobj_type overlay_ktype = {
-	.sysfs_ops = &overlay_sysfs_ops,
-	.default_attrs = overlay_sysfs_attrs,
-};
-
-int dss_overlay_kobj_init(struct omap_overlay *ovl,
-		struct platform_device *pdev)
-{
-	return kobject_init_and_add(&ovl->kobj, &overlay_ktype,
-			&pdev->dev.kobj, "overlay%d", ovl->id);
-}
-
-void dss_overlay_kobj_uninit(struct omap_overlay *ovl)
-{
-	kobject_del(&ovl->kobj);
-	kobject_put(&ovl->kobj);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/overlay.c b/drivers/gpu/drm/omapdrm/dss/overlay.c
deleted file mode 100644
index 2f7cee9..0000000
--- a/drivers/gpu/drm/omapdrm/dss/overlay.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/overlay.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "OVERLAY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sysfs.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static int num_overlays;
-static struct omap_overlay *overlays;
-
-int omap_dss_get_num_overlays(void)
-{
-	return num_overlays;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlays);
-
-struct omap_overlay *omap_dss_get_overlay(int num)
-{
-	if (num >= num_overlays)
-		return NULL;
-
-	return &overlays[num];
-}
-EXPORT_SYMBOL(omap_dss_get_overlay);
-
-void dss_init_overlays(struct platform_device *pdev)
-{
-	int i, r;
-
-	num_overlays = dss_feat_get_num_ovls();
-
-	overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays,
-			GFP_KERNEL);
-
-	BUG_ON(overlays == NULL);
-
-	for (i = 0; i < num_overlays; ++i) {
-		struct omap_overlay *ovl = &overlays[i];
-
-		switch (i) {
-		case 0:
-			ovl->name = "gfx";
-			ovl->id = OMAP_DSS_GFX;
-			break;
-		case 1:
-			ovl->name = "vid1";
-			ovl->id = OMAP_DSS_VIDEO1;
-			break;
-		case 2:
-			ovl->name = "vid2";
-			ovl->id = OMAP_DSS_VIDEO2;
-			break;
-		case 3:
-			ovl->name = "vid3";
-			ovl->id = OMAP_DSS_VIDEO3;
-			break;
-		}
-
-		ovl->caps = dss_feat_get_overlay_caps(ovl->id);
-		ovl->supported_modes =
-			dss_feat_get_supported_color_modes(ovl->id);
-
-		r = dss_overlay_kobj_init(ovl, pdev);
-		if (r)
-			DSSERR("failed to create sysfs file\n");
-	}
-}
-
-void dss_uninit_overlays(struct platform_device *pdev)
-{
-	int i;
-
-	for (i = 0; i < num_overlays; ++i) {
-		struct omap_overlay *ovl = &overlays[i];
-		dss_overlay_kobj_uninit(ovl);
-	}
-
-	kfree(overlays);
-	overlays = NULL;
-	num_overlays = 0;
-}
-
-int dss_ovl_simple_check(struct omap_overlay *ovl,
-		const struct omap_overlay_info *info)
-{
-	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
-		if (info->out_width != 0 && info->width != info->out_width) {
-			DSSERR("check_overlay: overlay %d doesn't support "
-					"scaling\n", ovl->id);
-			return -EINVAL;
-		}
-
-		if (info->out_height != 0 && info->height != info->out_height) {
-			DSSERR("check_overlay: overlay %d doesn't support "
-					"scaling\n", ovl->id);
-			return -EINVAL;
-		}
-	}
-
-	if ((ovl->supported_modes & info->color_mode) == 0) {
-		DSSERR("check_overlay: overlay %d doesn't support mode %d\n",
-				ovl->id, info->color_mode);
-		return -EINVAL;
-	}
-
-	if (info->zorder >= omap_dss_get_num_overlays()) {
-		DSSERR("check_overlay: zorder %d too high\n", info->zorder);
-		return -EINVAL;
-	}
-
-	if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
-		DSSERR("check_overlay: rotation type %d not supported\n",
-				info->rotation_type);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
-		const struct omap_video_timings *mgr_timings)
-{
-	u16 outw, outh;
-	u16 dw, dh;
-
-	dw = mgr_timings->x_res;
-	dh = mgr_timings->y_res;
-
-	if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
-		outw = info->width;
-		outh = info->height;
-	} else {
-		if (info->out_width == 0)
-			outw = info->width;
-		else
-			outw = info->out_width;
-
-		if (info->out_height == 0)
-			outh = info->height;
-		else
-			outh = info->out_height;
-	}
-
-	if (dw < info->pos_x + outw) {
-		DSSERR("overlay %d horizontally not inside the display area "
-				"(%d + %d >= %d)\n",
-				ovl->id, info->pos_x, outw, dw);
-		return -EINVAL;
-	}
-
-	if (dh < info->pos_y + outh) {
-		DSSERR("overlay %d vertically not inside the display area "
-				"(%d + %d >= %d)\n",
-				ovl->id, info->pos_y, outh, dh);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * Checks if replication logic should be used. Only use when overlay is in
- * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp
- */
-bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
-		enum omap_color_mode mode)
-{
-	if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
-		return false;
-
-	return config.video_port_width > 16;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index aea6a1d..3796576 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -880,7 +880,7 @@
 	struct omap_dss_device *out = &rfbi.output;
 	int r;
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		return -ENODEV;
 	}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index d747cc6..cd6d3bf 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -114,7 +114,7 @@
 
 static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = sdi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
 	sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
 
@@ -124,19 +124,20 @@
 	sdi.mgr_config.video_port_width = 24;
 	sdi.mgr_config.lcden_sig_polarity = 1;
 
-	dss_mgr_set_lcd_config(mgr, &sdi.mgr_config);
+	dss_mgr_set_lcd_config(channel, &sdi.mgr_config);
 }
 
 static int sdi_display_enable(struct omap_dss_device *dssdev)
 {
 	struct omap_dss_device *out = &sdi.output;
+	enum omap_channel channel = dssdev->dispc_channel;
 	struct omap_video_timings *t = &sdi.timings;
 	unsigned long fck;
 	struct dispc_clock_info dispc_cinfo;
 	unsigned long pck;
 	int r;
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("failed to enable display: no output/manager\n");
 		return -ENODEV;
 	}
@@ -169,7 +170,7 @@
 	}
 
 
-	dss_mgr_set_timings(out->manager, t);
+	dss_mgr_set_timings(channel, t);
 
 	r = dss_set_fck_rate(fck);
 	if (r)
@@ -188,7 +189,7 @@
 	 * need to care about the shadow register mechanism for pck-free. The
 	 * exact reason for this is unknown.
 	 */
-	dispc_mgr_set_clock_div(out->manager->id, &sdi.mgr_config.clock_info);
+	dispc_mgr_set_clock_div(channel, &sdi.mgr_config.clock_info);
 
 	dss_sdi_init(sdi.datapairs);
 	r = dss_sdi_enable();
@@ -196,7 +197,7 @@
 		goto err_sdi_enable;
 	mdelay(2);
 
-	r = dss_mgr_enable(out->manager);
+	r = dss_mgr_enable(channel);
 	if (r)
 		goto err_mgr_enable;
 
@@ -216,9 +217,9 @@
 
 static void sdi_display_disable(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = sdi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
-	dss_mgr_disable(mgr);
+	dss_mgr_disable(channel);
 
 	dss_sdi_disable();
 
@@ -242,9 +243,9 @@
 static int sdi_check_timings(struct omap_dss_device *dssdev,
 			struct omap_video_timings *timings)
 {
-	struct omap_overlay_manager *mgr = sdi.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
-	if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
+	if (!dispc_mgr_timings_ok(channel, timings))
 		return -EINVAL;
 
 	if (timings->pixelclock == 0)
@@ -280,18 +281,14 @@
 static int sdi_connect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
-	struct omap_overlay_manager *mgr;
+	enum omap_channel channel = dssdev->dispc_channel;
 	int r;
 
 	r = sdi_init_regulator();
 	if (r)
 		return r;
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(channel, dssdev);
 	if (r)
 		return r;
 
@@ -299,7 +296,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(channel, dssdev);
 		return r;
 	}
 
@@ -309,6 +306,8 @@
 static void sdi_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	enum omap_channel channel = dssdev->dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -316,8 +315,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(channel, dssdev);
 }
 
 static const struct omapdss_sdi_ops sdi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08f9def..08a2cc7 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -443,7 +443,7 @@
 
 static int venc_power_on(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = venc.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 	u32 l;
 	int r;
 
@@ -469,13 +469,13 @@
 
 	venc_write_reg(VENC_OUTPUT_CONTROL, l);
 
-	dss_mgr_set_timings(mgr, &venc.timings);
+	dss_mgr_set_timings(channel, &venc.timings);
 
 	r = regulator_enable(venc.vdda_dac_reg);
 	if (r)
 		goto err1;
 
-	r = dss_mgr_enable(mgr);
+	r = dss_mgr_enable(channel);
 	if (r)
 		goto err2;
 
@@ -494,12 +494,12 @@
 
 static void venc_power_off(struct omap_dss_device *dssdev)
 {
-	struct omap_overlay_manager *mgr = venc.output.manager;
+	enum omap_channel channel = dssdev->dispc_channel;
 
 	venc_write_reg(VENC_OUTPUT_CONTROL, 0);
 	dss_set_dac_pwrdn_bgz(0);
 
-	dss_mgr_disable(mgr);
+	dss_mgr_disable(channel);
 
 	regulator_disable(venc.vdda_dac_reg);
 
@@ -515,7 +515,7 @@
 
 	mutex_lock(&venc.venc_lock);
 
-	if (out->manager == NULL) {
+	if (!out->dispc_channel_connected) {
 		DSSERR("Failed to enable display: no output/manager\n");
 		r = -ENODEV;
 		goto err0;
@@ -742,18 +742,14 @@
 static int venc_connect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
-	struct omap_overlay_manager *mgr;
+	enum omap_channel channel = dssdev->dispc_channel;
 	int r;
 
 	r = venc_init_regulator();
 	if (r)
 		return r;
 
-	mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
-	if (!mgr)
-		return -ENODEV;
-
-	r = dss_mgr_connect(mgr, dssdev);
+	r = dss_mgr_connect(channel, dssdev);
 	if (r)
 		return r;
 
@@ -761,7 +757,7 @@
 	if (r) {
 		DSSERR("failed to connect output to new device: %s\n",
 				dst->name);
-		dss_mgr_disconnect(mgr, dssdev);
+		dss_mgr_disconnect(channel, dssdev);
 		return r;
 	}
 
@@ -771,6 +767,8 @@
 static void venc_disconnect(struct omap_dss_device *dssdev,
 		struct omap_dss_device *dst)
 {
+	enum omap_channel channel = dssdev->dispc_channel;
+
 	WARN_ON(dst != dssdev->dst);
 
 	if (dst != dssdev->dst)
@@ -778,8 +776,7 @@
 
 	omapdss_output_unset_device(dssdev);
 
-	if (dssdev->manager)
-		dss_mgr_disconnect(dssdev->manager, dssdev);
+	dss_mgr_disconnect(channel, dssdev);
 }
 
 static const struct omapdss_atv_ops venc_ops = {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 83f2a91..ce2d67b 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -63,6 +63,9 @@
 	if (timings->interlace)
 		mode->flags |= DRM_MODE_FLAG_INTERLACE;
 
+	if (timings->double_pixel)
+		mode->flags |= DRM_MODE_FLAG_DBLCLK;
+
 	if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
 		mode->flags |= DRM_MODE_FLAG_PHSYNC;
 	else
@@ -90,6 +93,7 @@
 	timings->vbp = mode->vtotal - mode->vsync_end;
 
 	timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+	timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
 
 	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
 		timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 2ed0754..075f2bb 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,14 +34,6 @@
 	const char *name;
 	enum omap_channel channel;
 
-	/*
-	 * Temporary: eventually this will go away, but it is needed
-	 * for now to keep the output's happy.  (They only need
-	 * mgr->id.)  Eventually this will be replaced w/ something
-	 * more common-panel-framework-y
-	 */
-	struct omap_overlay_manager *mgr;
-
 	struct omap_video_timings timings;
 
 	struct omap_drm_irq vblank_irq;
@@ -80,9 +72,13 @@
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
+	/*
+	 * Timeout is set to a "sufficiently" high value, which should cover
+	 * a single frame refresh even on slower displays.
+	 */
 	return wait_event_timeout(omap_crtc->pending_wait,
 				  !omap_crtc->pending,
-				  msecs_to_jiffies(50));
+				  msecs_to_jiffies(250));
 }
 
 /* -----------------------------------------------------------------------------
@@ -100,31 +96,32 @@
 
 /* ovl-mgr-id -> crtc */
 static struct omap_crtc *omap_crtcs[8];
+static struct omap_dss_device *omap_crtc_output[8];
 
 /* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(struct omap_overlay_manager *mgr,
+static int omap_crtc_dss_connect(enum omap_channel channel,
 		struct omap_dss_device *dst)
 {
-	if (mgr->output)
+	if (omap_crtc_output[channel])
 		return -EINVAL;
 
-	if ((mgr->supported_outputs & dst->id) == 0)
+	if ((dispc_mgr_get_supported_outputs(channel) & dst->id) == 0)
 		return -EINVAL;
 
-	dst->manager = mgr;
-	mgr->output = dst;
+	omap_crtc_output[channel] = dst;
+	dst->dispc_channel_connected = true;
 
 	return 0;
 }
 
-static void omap_crtc_dss_disconnect(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_disconnect(enum omap_channel channel,
 		struct omap_dss_device *dst)
 {
-	mgr->output->manager = NULL;
-	mgr->output = NULL;
+	omap_crtc_output[channel] = NULL;
+	dst->dispc_channel_connected = false;
 }
 
-static void omap_crtc_dss_start_update(struct omap_overlay_manager *mgr)
+static void omap_crtc_dss_start_update(enum omap_channel channel)
 {
 }
 
@@ -138,6 +135,11 @@
 	u32 framedone_irq, vsync_irq;
 	int ret;
 
+	if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+		dispc_mgr_enable(channel, enable);
+		return;
+	}
+
 	if (dispc_mgr_is_enabled(channel) == enable)
 		return;
 
@@ -186,9 +188,9 @@
 }
 
 
-static int omap_crtc_dss_enable(struct omap_overlay_manager *mgr)
+static int omap_crtc_dss_enable(enum omap_channel channel)
 {
-	struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+	struct omap_crtc *omap_crtc = omap_crtcs[channel];
 	struct omap_overlay_manager_info info;
 
 	memset(&info, 0, sizeof(info));
@@ -205,38 +207,38 @@
 	return 0;
 }
 
-static void omap_crtc_dss_disable(struct omap_overlay_manager *mgr)
+static void omap_crtc_dss_disable(enum omap_channel channel)
 {
-	struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+	struct omap_crtc *omap_crtc = omap_crtcs[channel];
 
 	omap_crtc_set_enabled(&omap_crtc->base, false);
 }
 
-static void omap_crtc_dss_set_timings(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_set_timings(enum omap_channel channel,
 		const struct omap_video_timings *timings)
 {
-	struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+	struct omap_crtc *omap_crtc = omap_crtcs[channel];
 	DBG("%s", omap_crtc->name);
 	omap_crtc->timings = *timings;
 }
 
-static void omap_crtc_dss_set_lcd_config(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
 		const struct dss_lcd_mgr_config *config)
 {
-	struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+	struct omap_crtc *omap_crtc = omap_crtcs[channel];
 	DBG("%s", omap_crtc->name);
 	dispc_mgr_set_lcd_config(omap_crtc->channel, config);
 }
 
 static int omap_crtc_dss_register_framedone(
-		struct omap_overlay_manager *mgr,
+		enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
 	return 0;
 }
 
 static void omap_crtc_dss_unregister_framedone(
-		struct omap_overlay_manager *mgr,
+		enum omap_channel channel,
 		void (*handler)(void *), void *data)
 {
 }
@@ -269,18 +271,7 @@
 		return;
 
 	spin_lock_irqsave(&dev->event_lock, flags);
-
-	list_del(&event->base.link);
-
-	/*
-	 * Queue the event for delivery if it's still linked to a file
-	 * handle, otherwise just destroy it.
-	 */
-	if (event->base.file_priv)
-		drm_crtc_send_vblank_event(crtc, event);
-	else
-		event->base.destroy(&event->base);
-
+	drm_crtc_send_vblank_event(crtc, event);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
@@ -341,13 +332,6 @@
 	kfree(omap_crtc);
 }
 
-static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void omap_crtc_enable(struct drm_crtc *crtc)
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -414,24 +398,40 @@
 	}
 }
 
+static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+	struct drm_property *property)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+
+	return property == priv->zorder_prop ||
+		property == dev->mode_config.rotation_property;
+}
+
 static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
 					 struct drm_crtc_state *state,
 					 struct drm_property *property,
 					 uint64_t val)
 {
-	struct drm_plane_state *plane_state;
-	struct drm_plane *plane = crtc->primary;
+	struct drm_device *dev = crtc->dev;
 
-	/*
-	 * Delegate property set to the primary plane. Get the plane state and
-	 * set the property directly.
-	 */
+	if (omap_crtc_is_plane_prop(dev, property)) {
+		struct drm_plane_state *plane_state;
+		struct drm_plane *plane = crtc->primary;
 
-	plane_state = drm_atomic_get_plane_state(state->state, plane);
-	if (!plane_state)
-		return -EINVAL;
+		/*
+		 * Delegate property set to the primary plane. Get the plane
+		 * state and set the property directly.
+		 */
 
-	return drm_atomic_plane_set_property(plane, plane_state, property, val);
+		plane_state = drm_atomic_get_plane_state(state->state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+
+		return drm_atomic_plane_set_property(plane, plane_state,
+				property, val);
+	}
+
+	return -EINVAL;
 }
 
 static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
@@ -439,14 +439,20 @@
 					 struct drm_property *property,
 					 uint64_t *val)
 {
-	/*
-	 * Delegate property get to the primary plane. The
-	 * drm_atomic_plane_get_property() function isn't exported, but can be
-	 * called through drm_object_property_get_value() as that will call
-	 * drm_atomic_get_property() for atomic drivers.
-	 */
-	return drm_object_property_get_value(&crtc->primary->base, property,
-					     val);
+	struct drm_device *dev = crtc->dev;
+
+	if (omap_crtc_is_plane_prop(dev, property)) {
+		/*
+		 * Delegate property get to the primary plane. The
+		 * drm_atomic_plane_get_property() function isn't exported, but
+		 * can be called through drm_object_property_get_value() as that
+		 * will call drm_atomic_get_property() for atomic drivers.
+		 */
+		return drm_object_property_get_value(&crtc->primary->base,
+				property, val);
+	}
+
+	return -EINVAL;
 }
 
 static const struct drm_crtc_funcs omap_crtc_funcs = {
@@ -462,7 +468,6 @@
 };
 
 static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
-	.mode_fixup = omap_crtc_mode_fixup,
 	.mode_set_nofb = omap_crtc_mode_set_nofb,
 	.disable = omap_crtc_disable,
 	.enable = omap_crtc_enable,
@@ -520,9 +525,6 @@
 	omap_crtc->error_irq.irq = omap_crtc_error_irq;
 	omap_irq_register(dev, &omap_crtc->error_irq);
 
-	/* temporary: */
-	omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
-
 	ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
 					&omap_crtc_funcs, NULL);
 	if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 85dfe36..de275a5 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -79,6 +79,16 @@
 			DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
 };
 
+static u32 dmm_read(struct dmm *dmm, u32 reg)
+{
+	return readl(dmm->base + reg);
+}
+
+static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
+{
+	writel(val, dmm->base + reg);
+}
+
 /* simple allocator to grab next 16 byte aligned memory from txn */
 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
 {
@@ -108,7 +118,7 @@
 
 	i = DMM_FIXED_RETRY_COUNT;
 	while (true) {
-		r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+		r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
 		err = r & DMM_PATSTATUS_ERR;
 		if (err)
 			return -EFAULT;
@@ -140,11 +150,11 @@
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
 	struct dmm *dmm = arg;
-	uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+	uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
 	int i;
 
 	/* ack IRQ */
-	writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+	dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
 
 	for (i = 0; i < dmm->num_engines; i++) {
 		if (status & DMM_IRQSTAT_LST) {
@@ -264,7 +274,7 @@
 	txn->last_pat->next_pa = 0;
 
 	/* write to PAT_DESCR to clear out any pending transaction */
-	writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+	dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
 
 	/* wait for engine ready: */
 	ret = wait_status(engine, DMM_PATSTATUS_READY);
@@ -280,8 +290,7 @@
 	smp_mb();
 
 	/* kick reload */
-	writel(engine->refill_pa,
-		dmm->base + reg[PAT_DESCR][engine->id]);
+	dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
 
 	if (wait) {
 		if (!wait_for_completion_timeout(&engine->compl,
@@ -309,6 +318,21 @@
 	struct tcm_area slice, area_s;
 	struct dmm_txn *txn;
 
+	/*
+	 * FIXME
+	 *
+	 * Asynchronous fill does not work reliably, as the driver does not
+	 * handle errors in the async code paths. The fill operation may
+	 * silently fail, leading to leaking DMM engines, which may eventually
+	 * lead to deadlock if we run out of DMM engines.
+	 *
+	 * For now, always set 'wait' so that we only use sync fills. Async
+	 * fills should be fixed, or alternatively we could decide to only
+	 * support sync fills and so the whole async code path could be removed.
+	 */
+
+	wait = true;
+
 	txn = dmm_txn_init(omap_dmm, area->tcm);
 	if (IS_ERR_OR_NULL(txn))
 		return -ENOMEM;
@@ -641,7 +665,7 @@
 
 	omap_dmm->dev = &dev->dev;
 
-	hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+	hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
 	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
 	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
 	omap_dmm->container_width = 256;
@@ -650,7 +674,7 @@
 	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
 
 	/* read out actual LUT width and height */
-	pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+	pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
 	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
 	omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
 
@@ -660,12 +684,12 @@
 		omap_dmm->num_lut++;
 
 	/* initialize DMM registers */
-	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
-	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
-	writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
-	writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
-	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
-	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
+	dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
+	dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
+	dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
+	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
+	dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
 
 	ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
 				"omap_dmm_irq_handler", omap_dmm);
@@ -683,7 +707,7 @@
 	 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
 	 * we just generally don't care about.
 	 */
-	writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+	dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
 
 	omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
 	if (!omap_dmm->dummy_page) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index dfafdb6..80398a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,7 +142,6 @@
 {
 	struct omap_drm_private *priv = dev->dev_private;
 	struct omap_atomic_state_commit *commit;
-	unsigned long flags;
 	unsigned int i;
 	int ret;
 
@@ -175,17 +174,6 @@
 	priv->commit.pending |= commit->crtcs;
 	spin_unlock(&priv->commit.lock);
 
-	/* Keep track of all CRTC events to unlink them in preclose(). */
-	spin_lock_irqsave(&dev->event_lock, flags);
-	for (i = 0; i < dev->mode_config.num_crtc; ++i) {
-		struct drm_crtc_state *cstate = state->crtc_states[i];
-
-		if (cstate && cstate->event)
-			list_add_tail(&cstate->event->base.link,
-				      &priv->commit.events);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-
 	/* Swap the state, this is the point of no return. */
 	drm_atomic_helper_swap_state(dev, state);
 
@@ -352,7 +340,7 @@
 		struct drm_connector *connector;
 		struct drm_encoder *encoder;
 		enum omap_channel channel;
-		struct omap_overlay_manager *mgr;
+		struct omap_dss_device *out;
 
 		if (!omapdss_device_is_connected(dssdev))
 			continue;
@@ -399,8 +387,10 @@
 		 * not considered.
 		 */
 
-		mgr = omapdss_find_mgr_from_display(dssdev);
-		channel = mgr->id;
+		out = omapdss_find_output_from_display(dssdev);
+		channel = out->dispc_channel;
+		omap_dss_put_device(out);
+
 		/*
 		 * if this channel hasn't already been taken by a previously
 		 * allocated crtc, we create a new crtc for it
@@ -673,7 +663,6 @@
 	priv->wq = alloc_ordered_workqueue("omapdrm", 0);
 	init_waitqueue_head(&priv->commit.wait);
 	spin_lock_init(&priv->commit.lock);
-	INIT_LIST_HEAD(&priv->commit.events);
 
 	spin_lock_init(&priv->list_lock);
 	INIT_LIST_HEAD(&priv->obj_list);
@@ -787,33 +776,6 @@
 	}
 }
 
-static void dev_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct omap_drm_private *priv = dev->dev_private;
-	struct drm_pending_event *event;
-	unsigned long flags;
-
-	DBG("preclose: dev=%p", dev);
-
-	/*
-	 * Unlink all pending CRTC events to make sure they won't be queued up
-	 * by a pending asynchronous commit.
-	 */
-	spin_lock_irqsave(&dev->event_lock, flags);
-	list_for_each_entry(event, &priv->commit.events, link) {
-		if (event->file_priv == file) {
-			file->event_space += event->event->length;
-			event->file_priv = NULL;
-		}
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void dev_postclose(struct drm_device *dev, struct drm_file *file)
-{
-	DBG("postclose: dev=%p, file=%p", dev, file);
-}
-
 static const struct vm_operations_struct omap_gem_vm_ops = {
 	.fault = omap_gem_fault,
 	.open = drm_gem_vm_open,
@@ -838,8 +800,6 @@
 	.unload = dev_unload,
 	.open = dev_open,
 	.lastclose = dev_lastclose,
-	.preclose = dev_preclose,
-	.postclose = dev_postclose,
 	.set_busid = drm_platform_set_busid,
 	.get_vblank_counter = drm_vblank_no_hw_counter,
 	.enable_vblank = omap_irq_enable_vblank,
@@ -900,12 +860,52 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int omap_drm_suspend_all_displays(void)
+{
+	struct omap_dss_device *dssdev = NULL;
+
+	for_each_dss_dev(dssdev) {
+		if (!dssdev->driver)
+			continue;
+
+		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+			dssdev->driver->disable(dssdev);
+			dssdev->activate_after_resume = true;
+		} else {
+			dssdev->activate_after_resume = false;
+		}
+	}
+
+	return 0;
+}
+
+static int omap_drm_resume_all_displays(void)
+{
+	struct omap_dss_device *dssdev = NULL;
+
+	for_each_dss_dev(dssdev) {
+		if (!dssdev->driver)
+			continue;
+
+		if (dssdev->activate_after_resume) {
+			dssdev->driver->enable(dssdev);
+			dssdev->activate_after_resume = false;
+		}
+	}
+
+	return 0;
+}
+
 static int omap_drm_suspend(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
 	drm_kms_helper_poll_disable(drm_dev);
 
+	drm_modeset_lock_all(drm_dev);
+	omap_drm_suspend_all_displays();
+	drm_modeset_unlock_all(drm_dev);
+
 	return 0;
 }
 
@@ -913,6 +913,10 @@
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 
+	drm_modeset_lock_all(drm_dev);
+	omap_drm_resume_all_displays();
+	drm_modeset_unlock_all(drm_dev);
+
 	drm_kms_helper_poll_enable(drm_dev);
 
 	return omap_gem_resume(dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 9e00307..0fbe17d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -31,6 +31,8 @@
 #include <drm/drm_gem.h>
 #include <drm/omap_drm.h>
 
+#include "dss/omapdss.h"
+
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
 
@@ -106,7 +108,6 @@
 
 	/* atomic commit */
 	struct {
-		struct list_head events;
 		wait_queue_head_t wait;
 		u32 pending;
 		spinlock_t lock;	/* Protects commit.pending */
@@ -189,12 +190,15 @@
 		struct omap_drm_window *win, struct omap_overlay_info *info);
 struct drm_connector *omap_framebuffer_get_next_connector(
 		struct drm_framebuffer *fb, struct drm_connector *from);
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
 
 void omap_gem_init(struct drm_device *dev);
 void omap_gem_deinit(struct drm_device *dev);
 
 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
 		union omap_gem_size gsize, uint32_t flags);
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+		struct sg_table *sgt);
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
 void omap_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 61714e96..0bbb9c5 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -139,11 +139,16 @@
 	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
 	struct omap_dss_device *dssdev = omap_encoder->dssdev;
 	struct omap_dss_driver *dssdrv = dssdev->driver;
+	int r;
 
 	omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc),
 			    omap_crtc_timings(encoder->crtc));
 
-	dssdrv->enable(dssdev);
+	r = dssdrv->enable(dssdev);
+	if (r)
+		dev_err(encoder->dev->dev,
+			"Failed to enable display '%s': %d\n",
+			dssdev->name, r);
 }
 
 static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index ad202df..6109623 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -145,6 +145,14 @@
 	return plane->paddr + offset;
 }
 
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	struct plane *plane = &omap_fb->planes[0];
+
+	return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+}
+
 /* update ovl info for scanout, handles cases of multi-planar fb's, etc.
  */
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -449,6 +457,14 @@
 			goto fail;
 		}
 
+		if (i > 0 && pitch != mode_cmd->pitches[i - 1]) {
+			dev_err(dev->dev,
+				"pitches are not the same between framebuffer planes %d != %d\n",
+				pitch, mode_cmd->pitches[i - 1]);
+			ret = -EINVAL;
+			goto fail;
+		}
+
 		plane->bo     = bos[i];
 		plane->offset = mode_cmd->offsets[i];
 		plane->pitch  = pitch;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 359b0d7..907154f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -31,9 +31,9 @@
  */
 
 /* note: we use upper 8 bits of flags for driver-internal flags: */
-#define OMAP_BO_DMA		0x01000000	/* actually is physically contiguous */
-#define OMAP_BO_EXT_SYNC	0x02000000	/* externally allocated sync object */
-#define OMAP_BO_EXT_MEM		0x04000000	/* externally allocated memory */
+#define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
+#define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
+#define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
 
 struct omap_gem_object {
 	struct drm_gem_object base;
@@ -49,17 +49,25 @@
 	uint32_t roll;
 
 	/**
-	 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
-	 * is set and the paddr is valid.  Also if the buffer is remapped in
-	 * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
-	 * the physical address and OMAP_BO_DMA is not set, then you should
-	 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
-	 * not removed from under your feet.
+	 * paddr contains the buffer DMA address. It is valid for
 	 *
-	 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
-	 * buffer is requested, but doesn't mean that it is.  Use the
-	 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
-	 * physical address.
+	 * - buffers allocated through the DMA mapping API (with the
+	 *   OMAP_BO_MEM_DMA_API flag set)
+	 *
+	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
+	 *   if they are physically contiguous (when sgt->orig_nents == 1)
+	 *
+	 * - buffers mapped through the TILER when paddr_cnt is not zero, in
+	 *   which case the DMA address points to the TILER aperture
+	 *
+	 * Physically contiguous buffers have their DMA address equal to the
+	 * physical address as we don't remap those buffers through the TILER.
+	 *
+	 * Buffers mapped to the TILER have their DMA address pointing to the
+	 * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
+	 * the DMA address must be accessed through omap_get_get_paddr() to
+	 * ensure that the mapping won't disappear unexpectedly. References must
+	 * be released with omap_gem_put_paddr().
 	 */
 	dma_addr_t paddr;
 
@@ -69,6 +77,12 @@
 	uint32_t paddr_cnt;
 
 	/**
+	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
+	 * is set and the sgt field is valid.
+	 */
+	struct sg_table *sgt;
+
+	/**
 	 * tiler block used when buffer is remapped in DMM/TILER.
 	 */
 	struct tiler_block *block;
@@ -91,17 +105,7 @@
 	 * sync-object allocated on demand (if needed)
 	 *
 	 * Per-buffer sync-object for tracking pending and completed hw/dma
-	 * read and write operations.  The layout in memory is dictated by
-	 * the SGX firmware, which uses this information to stall the command
-	 * stream if a surface is not ready yet.
-	 *
-	 * Note that when buffer is used by SGX, the sync-object needs to be
-	 * allocated from a special heap of sync-objects.  This way many sync
-	 * objects can be packed in a page, and not waste GPU virtual address
-	 * space.  Because of this we have to have a omap_gem_set_sync_object()
-	 * API to allow replacement of the syncobj after it has (potentially)
-	 * already been allocated.  A bit ugly but I haven't thought of a
-	 * better alternative.
+	 * read and write operations.
 	 */
 	struct {
 		uint32_t write_pending;
@@ -166,16 +170,15 @@
 	return drm_vma_node_offset_addr(&obj->vma_node);
 }
 
-/* GEM objects can either be allocated from contiguous memory (in which
- * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
- * contiguous buffers can be remapped in TILER/DMM if they need to be
- * contiguous... but we don't do this all the time to reduce pressure
- * on TILER/DMM space when we know at allocation time that the buffer
- * will need to be scanned out.
- */
-static inline bool is_shmem(struct drm_gem_object *obj)
+static bool is_contiguous(struct omap_gem_object *omap_obj)
 {
-	return obj->filp != NULL;
+	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
+		return true;
+
+	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+		return true;
+
+	return false;
 }
 
 /* -----------------------------------------------------------------------------
@@ -264,6 +267,19 @@
 		for (i = 0; i < npages; i++) {
 			addrs[i] = dma_map_page(dev->dev, pages[i],
 					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+			if (dma_mapping_error(dev->dev, addrs[i])) {
+				dev_warn(dev->dev,
+					"%s: failed to map page\n", __func__);
+
+				for (i = i - 1; i >= 0; --i) {
+					dma_unmap_page(dev->dev, addrs[i],
+						PAGE_SIZE, DMA_BIDIRECTIONAL);
+				}
+
+				ret = -ENOMEM;
+				goto free_addrs;
+			}
 		}
 	} else {
 		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
@@ -278,6 +294,8 @@
 
 	return 0;
 
+free_addrs:
+	kfree(addrs);
 free_pages:
 	drm_gem_put_pages(obj, pages, true, false);
 
@@ -292,7 +310,7 @@
 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
 	int ret = 0;
 
-	if (is_shmem(obj) && !omap_obj->pages) {
+	if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
 		ret = omap_gem_attach_pages(obj);
 		if (ret) {
 			dev_err(obj->dev->dev, "could not attach pages\n");
@@ -396,7 +414,7 @@
 		omap_gem_cpu_sync(obj, pgoff);
 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
 	} else {
-		BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+		BUG_ON(!is_contiguous(omap_obj));
 		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
 	}
 
@@ -560,6 +578,11 @@
 	case 0:
 	case -ERESTARTSYS:
 	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
 		return VM_FAULT_NOPAGE;
 	case -ENOMEM:
 		return VM_FAULT_OOM;
@@ -728,7 +751,8 @@
 static inline bool is_cached_coherent(struct drm_gem_object *obj)
 {
 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
-	return is_shmem(obj) &&
+
+	return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
 		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
 }
 
@@ -761,9 +785,20 @@
 
 		for (i = 0; i < npages; i++) {
 			if (!omap_obj->addrs[i]) {
-				omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+				dma_addr_t addr;
+
+				addr = dma_map_page(dev->dev, pages[i], 0,
 						PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+				if (dma_mapping_error(dev->dev, addr)) {
+					dev_warn(dev->dev,
+						"%s: failed to map page\n",
+						__func__);
+					break;
+				}
+
 				dirty = true;
+				omap_obj->addrs[i] = addr;
 			}
 		}
 
@@ -787,7 +822,7 @@
 
 	mutex_lock(&obj->dev->struct_mutex);
 
-	if (remap && is_shmem(obj) && priv->has_dmm) {
+	if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
 		if (omap_obj->paddr_cnt == 0) {
 			struct page **pages;
 			uint32_t npages = obj->size >> PAGE_SHIFT;
@@ -834,7 +869,7 @@
 		omap_obj->paddr_cnt++;
 
 		*paddr = omap_obj->paddr;
-	} else if (omap_obj->flags & OMAP_BO_DMA) {
+	} else if (is_contiguous(omap_obj)) {
 		*paddr = omap_obj->paddr;
 	} else {
 		ret = -EINVAL;
@@ -1138,20 +1173,6 @@
 	return ret;
 }
 
-/* it is a bit lame to handle updates in this sort of polling way, but
- * in case of PVR, the GPU can directly update read/write complete
- * values, and not really tell us which ones it updated.. this also
- * means that sync_lock is not quite sufficient.  So we'll need to
- * do something a bit better when it comes time to add support for
- * separate 2d hw..
- */
-void omap_gem_op_update(void)
-{
-	spin_lock(&sync_lock);
-	sync_op_update();
-	spin_unlock(&sync_lock);
-}
-
 /* mark the start of read and/or write operation */
 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
 {
@@ -1219,7 +1240,7 @@
  * is currently blocked..  fxn() can be called from any context
  *
  * (TODO for now fxn is called back from whichever context calls
- * omap_gem_op_update().. but this could be better defined later
+ * omap_gem_op_finish().. but this could be better defined later
  * if needed)
  *
  * TODO more code in common w/ _sync()..
@@ -1261,50 +1282,10 @@
 	return 0;
 }
 
-/* special API so PVR can update the buffer to use a sync-object allocated
- * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
- * perspective) sync-object, so we overwrite the new syncobj w/ values
- * from the already allocated syncobj (if there is one)
- */
-int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
-{
-	struct omap_gem_object *omap_obj = to_omap_bo(obj);
-	int ret = 0;
-
-	spin_lock(&sync_lock);
-
-	if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
-		/* clearing a previously set syncobj */
-		syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
-				  GFP_ATOMIC);
-		if (!syncobj) {
-			ret = -ENOMEM;
-			goto unlock;
-		}
-		omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
-		omap_obj->sync = syncobj;
-	} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
-		/* replacing an existing syncobj */
-		if (omap_obj->sync) {
-			memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
-			kfree(omap_obj->sync);
-		}
-		omap_obj->flags |= OMAP_BO_EXT_SYNC;
-		omap_obj->sync = syncobj;
-	}
-
-unlock:
-	spin_unlock(&sync_lock);
-	return ret;
-}
-
 /* -----------------------------------------------------------------------------
  * Constructor & Destructor
  */
 
-/* don't call directly.. called from GEM core when it is time to actually
- * free the object..
- */
 void omap_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
@@ -1324,22 +1305,23 @@
 	 */
 	WARN_ON(omap_obj->paddr_cnt > 0);
 
-	/* don't free externally allocated backing memory */
-	if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
-		if (omap_obj->pages)
+	if (omap_obj->pages) {
+		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
+			kfree(omap_obj->pages);
+		else
 			omap_gem_detach_pages(obj);
-
-		if (!is_shmem(obj)) {
-			dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
-				    omap_obj->paddr);
-		} else if (omap_obj->vaddr) {
-			vunmap(omap_obj->vaddr);
-		}
 	}
 
-	/* don't free externally allocated syncobj */
-	if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
-		kfree(omap_obj->sync);
+	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
+		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
+			    omap_obj->paddr);
+	} else if (omap_obj->vaddr) {
+		vunmap(omap_obj->vaddr);
+	} else if (obj->import_attach) {
+		drm_prime_gem_destroy(obj, omap_obj->sgt);
+	}
+
+	kfree(omap_obj->sync);
 
 	drm_gem_object_release(obj);
 
@@ -1357,84 +1339,160 @@
 	size_t size;
 	int ret;
 
+	/* Validate the flags and compute the memory and cache flags. */
 	if (flags & OMAP_BO_TILED) {
 		if (!priv->usergart) {
 			dev_err(dev->dev, "Tiled buffers require DMM\n");
 			return NULL;
 		}
 
-		/* tiled buffers are always shmem paged backed.. when they are
-		 * scanned out, they are remapped into DMM/TILER
+		/*
+		 * Tiled buffers are always shmem paged backed. When they are
+		 * scanned out, they are remapped into DMM/TILER.
 		 */
 		flags &= ~OMAP_BO_SCANOUT;
+		flags |= OMAP_BO_MEM_SHMEM;
 
-		/* currently don't allow cached buffers.. there is some caching
-		 * stuff that needs to be handled better
+		/*
+		 * Currently don't allow cached buffers. There is some caching
+		 * stuff that needs to be handled better.
 		 */
 		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
 		flags |= tiler_get_cpu_cache_flags();
-
-		/* align dimensions to slot boundaries... */
-		tiler_align(gem2fmt(flags),
-				&gsize.tiled.width, &gsize.tiled.height);
-
-		/* ...and calculate size based on aligned dimensions */
-		size = tiler_size(gem2fmt(flags),
-				gsize.tiled.width, gsize.tiled.height);
-	} else {
-		size = PAGE_ALIGN(gsize.bytes);
+	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+		/*
+		 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
+		 * tiled. However, to lower the pressure on memory allocation,
+		 * use contiguous memory only if no TILER is available.
+		 */
+		flags |= OMAP_BO_MEM_DMA_API;
+	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
+		/*
+		 * All other buffers not backed by dma_buf are shmem-backed.
+		 */
+		flags |= OMAP_BO_MEM_SHMEM;
 	}
 
+	/* Allocate the initialize the OMAP GEM object. */
 	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
 	if (!omap_obj)
 		return NULL;
 
 	obj = &omap_obj->base;
+	omap_obj->flags = flags;
 
-	if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
-		/* attempt to allocate contiguous memory if we don't
-		 * have DMM for remappign discontiguous buffers
+	if (flags & OMAP_BO_TILED) {
+		/*
+		 * For tiled buffers align dimensions to slot boundaries and
+		 * calculate size based on aligned dimensions.
 		 */
-		omap_obj->vaddr =  dma_alloc_wc(dev->dev, size,
-						&omap_obj->paddr, GFP_KERNEL);
-		if (!omap_obj->vaddr) {
-			kfree(omap_obj);
+		tiler_align(gem2fmt(flags), &gsize.tiled.width,
+			    &gsize.tiled.height);
 
-			return NULL;
-		}
+		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
+				  gsize.tiled.height);
 
-		flags |= OMAP_BO_DMA;
+		omap_obj->width = gsize.tiled.width;
+		omap_obj->height = gsize.tiled.height;
+	} else {
+		size = PAGE_ALIGN(gsize.bytes);
+	}
+
+	/* Initialize the GEM object. */
+	if (!(flags & OMAP_BO_MEM_SHMEM)) {
+		drm_gem_private_object_init(dev, obj, size);
+	} else {
+		ret = drm_gem_object_init(dev, obj, size);
+		if (ret)
+			goto err_free;
+
+		mapping = file_inode(obj->filp)->i_mapping;
+		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+	}
+
+	/* Allocate memory if needed. */
+	if (flags & OMAP_BO_MEM_DMA_API) {
+		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
+					       &omap_obj->paddr,
+					       GFP_KERNEL);
+		if (!omap_obj->vaddr)
+			goto err_release;
 	}
 
 	spin_lock(&priv->list_lock);
 	list_add(&omap_obj->mm_list, &priv->obj_list);
 	spin_unlock(&priv->list_lock);
 
-	omap_obj->flags = flags;
-
-	if (flags & OMAP_BO_TILED) {
-		omap_obj->width = gsize.tiled.width;
-		omap_obj->height = gsize.tiled.height;
-	}
-
-	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
-		drm_gem_private_object_init(dev, obj, size);
-	} else {
-		ret = drm_gem_object_init(dev, obj, size);
-		if (ret)
-			goto fail;
-
-		mapping = file_inode(obj->filp)->i_mapping;
-		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
-	}
-
 	return obj;
 
-fail:
-	omap_gem_free_object(obj);
+err_release:
+	drm_gem_object_release(obj);
+err_free:
+	kfree(omap_obj);
 	return NULL;
 }
 
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+					   struct sg_table *sgt)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_gem_object *omap_obj;
+	struct drm_gem_object *obj;
+	union omap_gem_size gsize;
+
+	/* Without a DMM only physically contiguous buffers can be supported. */
+	if (sgt->orig_nents != 1 && !priv->has_dmm)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&dev->struct_mutex);
+
+	gsize.bytes = PAGE_ALIGN(size);
+	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
+	if (!obj) {
+		obj = ERR_PTR(-ENOMEM);
+		goto done;
+	}
+
+	omap_obj = to_omap_bo(obj);
+	omap_obj->sgt = sgt;
+
+	if (sgt->orig_nents == 1) {
+		omap_obj->paddr = sg_dma_address(sgt->sgl);
+	} else {
+		/* Create pages list from sgt */
+		struct sg_page_iter iter;
+		struct page **pages;
+		unsigned int npages;
+		unsigned int i = 0;
+
+		npages = DIV_ROUND_UP(size, PAGE_SIZE);
+		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+		if (!pages) {
+			omap_gem_free_object(obj);
+			obj = ERR_PTR(-ENOMEM);
+			goto done;
+		}
+
+		omap_obj->pages = pages;
+
+		for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+			pages[i++] = sg_page_iter_page(&iter);
+			if (i > npages)
+				break;
+		}
+
+		if (WARN_ON(i != npages)) {
+			omap_gem_free_object(obj);
+			obj = ERR_PTR(-ENOMEM);
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&dev->struct_mutex);
+	return obj;
+}
+
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 27c2976..3cf8aab 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -21,6 +21,10 @@
 
 #include "omap_drv.h"
 
+/* -----------------------------------------------------------------------------
+ * DMABUF Export
+ */
+
 static struct sg_table *omap_gem_map_dma_buf(
 		struct dma_buf_attachment *attachment,
 		enum dma_data_direction dir)
@@ -79,7 +83,7 @@
 
 
 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
-		size_t start, size_t len, enum dma_data_direction dir)
+		enum dma_data_direction dir)
 {
 	struct drm_gem_object *obj = buffer->priv;
 	struct page **pages;
@@ -94,7 +98,7 @@
 }
 
 static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
-		size_t start, size_t len, enum dma_data_direction dir)
+		enum dma_data_direction dir)
 {
 	struct drm_gem_object *obj = buffer->priv;
 	omap_gem_put_pages(obj);
@@ -178,15 +182,20 @@
 	return dma_buf_export(&exp_info);
 }
 
-struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
-		struct dma_buf *buffer)
-{
-	struct drm_gem_object *obj;
+/* -----------------------------------------------------------------------------
+ * DMABUF Import
+ */
 
-	/* is this one of own objects? */
-	if (buffer->ops == &omap_dmabuf_ops) {
-		obj = buffer->priv;
-		/* is it from our device? */
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+					     struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct drm_gem_object *obj;
+	struct sg_table *sgt;
+	int ret;
+
+	if (dma_buf->ops == &omap_dmabuf_ops) {
+		obj = dma_buf->priv;
 		if (obj->dev == dev) {
 			/*
 			 * Importing dmabuf exported from out own gem increases
@@ -197,9 +206,33 @@
 		}
 	}
 
-	/*
-	 * TODO add support for importing buffers from other devices..
-	 * for now we don't need this but would be nice to add eventually
-	 */
-	return ERR_PTR(-EINVAL);
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	get_dma_buf(dma_buf);
+
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sgt)) {
+		ret = PTR_ERR(sgt);
+		goto fail_detach;
+	}
+
+	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto fail_unmap;
+	}
+
+	obj->import_attach = attach;
+
+	return obj;
+
+fail_unmap:
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index d75b197..93ee538 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -177,6 +177,12 @@
 	if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
 		return -EINVAL;
 
+	if (state->fb) {
+		if (state->rotation != BIT(DRM_ROTATE_0) &&
+		    !omap_framebuffer_supports_rotation(state->fb))
+			return -EINVAL;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f88a631..ceb2048 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -847,6 +847,7 @@
 	.vsync_end = 768 + 38 + 1,
 	.vtotal = 768 + 38 + 1 + 0,
 	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
 };
 
 static const struct panel_desc innolux_g121x1_l03 = {
@@ -982,6 +983,29 @@
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
+static const struct drm_display_mode lg_lp120up1_mode = {
+	.clock = 162300,
+	.hdisplay = 1920,
+	.hsync_start = 1920 + 40,
+	.hsync_end = 1920 + 40 + 40,
+	.htotal = 1920 + 40 + 40+ 80,
+	.vdisplay = 1280,
+	.vsync_start = 1280 + 4,
+	.vsync_end = 1280 + 4 + 4,
+	.vtotal = 1280 + 4 + 4 + 12,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+	.modes = &lg_lp120up1_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 267,
+		.height = 183,
+	},
+};
+
 static const struct drm_display_mode lg_lp129qe_mode = {
 	.clock = 285250,
 	.hdisplay = 2560,
@@ -1016,6 +1040,7 @@
 	.vsync_end = 272 + 2 + 4,
 	.vtotal = 272 + 2 + 4 + 2,
 	.vrefresh = 74,
+	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
 };
 
 static const struct panel_desc nec_nl4827hc19_05b = {
@@ -1176,6 +1201,42 @@
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct display_timing urt_umsh_8596md_timing = {
+	.pixelclock = { 33260000, 33260000, 33260000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 41, 41, 41 },
+	.hback_porch = { 216 - 128, 216 - 128, 216 - 128 },
+	.hsync_len = { 71, 128, 128 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 10, 10, 10 },
+	.vback_porch = { 35 - 2, 35 - 2, 35 - 2 },
+	.vsync_len = { 2, 2, 2 },
+	.flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
+		DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc urt_umsh_8596md_lvds = {
+	.timings = &urt_umsh_8596md_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 152,
+		.height = 91,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
+static const struct panel_desc urt_umsh_8596md_parallel = {
+	.timings = &urt_umsh_8596md_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 152,
+		.height = 91,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct of_device_id platform_of_match[] = {
 	{
 		.compatible = "ampire,am800480r3tmqwa1h",
@@ -1256,6 +1317,9 @@
 		.compatible = "lg,lb070wv8",
 		.data = &lg_lb070wv8,
 	}, {
+		.compatible = "lg,lp120up1",
+		.data = &lg_lp120up1,
+	}, {
 		.compatible = "lg,lp129qe",
 		.data = &lg_lp129qe,
 	}, {
@@ -1280,6 +1344,24 @@
 		.compatible = "shelly,sca07010-bfn-lnn",
 		.data = &shelly_sca07010_bfn_lnn,
 	}, {
+		.compatible = "urt,umsh-8596md-t",
+		.data = &urt_umsh_8596md_parallel,
+	}, {
+		.compatible = "urt,umsh-8596md-1t",
+		.data = &urt_umsh_8596md_parallel,
+	}, {
+		.compatible = "urt,umsh-8596md-7t",
+		.data = &urt_umsh_8596md_parallel,
+	}, {
+		.compatible = "urt,umsh-8596md-11t",
+		.data = &urt_umsh_8596md_lvds,
+	}, {
+		.compatible = "urt,umsh-8596md-19t",
+		.data = &urt_umsh_8596md_lvds,
+	}, {
+		.compatible = "urt,umsh-8596md-20t",
+		.data = &urt_umsh_8596md_parallel,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8627651..43e5f50 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -734,14 +734,6 @@
 	DRM_DEBUG("\n");
 }
 
-static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
-			       const struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-	DRM_DEBUG("\n");
-	return true;
-}
-
 static void qxl_enc_prepare(struct drm_encoder *encoder)
 {
 	DRM_DEBUG("\n");
@@ -864,7 +856,6 @@
 
 static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
 	.dpms = qxl_enc_dpms,
-	.mode_fixup = qxl_enc_mode_fixup,
 	.prepare = qxl_enc_prepare,
 	.mode_set = qxl_enc_mode_set,
 	.commit = qxl_enc_commit,
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ec1593a..f66c33d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -66,9 +66,10 @@
 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
 
-static uint32_t atom_arg_mask[8] =
-    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
-0xFF000000 };
+static uint32_t atom_arg_mask[8] = {
+	0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
+	0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+};
 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
 
 static int atom_dst_to_src[8][4] = {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 801dd60..cf61e08 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,13 @@
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
 		atombios_blank_crtc(crtc, ATOM_DISABLE);
-		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_on(dev, radeon_crtc->crtc_id);
 		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_off(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->enabled)
 			atombios_blank_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -1665,11 +1665,11 @@
 }
 
 int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
-                                  struct drm_framebuffer *fb,
+				  struct drm_framebuffer *fb,
 				  int x, int y, enum mode_set_atomic state)
 {
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
 		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6af8325..afa9db1 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -37,10 +37,10 @@
 #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
 
 static char *voltage_names[] = {
-        "0.4V", "0.6V", "0.8V", "1.2V"
+	"0.4V", "0.6V", "0.8V", "1.2V"
 };
 static char *pre_emph_names[] = {
-        "0dB", "3.5dB", "6dB", "9.5dB"
+	"0dB", "3.5dB", "6dB", "9.5dB"
 };
 
 /***** radeon AUX functions *****/
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 01b20e1..edd05cd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -892,8 +892,6 @@
 			else
 				args.v1.ucLaneNum = 4;
 
-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
 			switch (radeon_encoder->encoder_id) {
 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +908,10 @@
 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
 			else
 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
 			break;
 		case 2:
 		case 3:
@@ -2623,16 +2625,8 @@
 
 }
 
-static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
-				       const struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
 	.dpms = radeon_atom_ext_dpms,
-	.mode_fixup = radeon_atom_ext_mode_fixup,
 	.prepare = radeon_atom_ext_prepare,
 	.mode_set = radeon_atom_ext_mode_set,
 	.commit = radeon_atom_ext_commit,
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 69556f5..38e5123 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1163,12 +1163,11 @@
 	155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
 };
 
-static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
-{
-        { 10000, 30000, RADEON_SCLK_UP },
-        { 15000, 30000, RADEON_SCLK_UP },
-        { 20000, 30000, RADEON_SCLK_UP },
-        { 25000, 30000, RADEON_SCLK_UP }
+static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = {
+	{ 10000, 30000, RADEON_SCLK_UP },
+	{ 15000, 30000, RADEON_SCLK_UP },
+	{ 20000, 30000, RADEON_SCLK_UP },
+	{ 25000, 30000, RADEON_SCLK_UP }
 };
 
 void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
@@ -1637,14 +1636,14 @@
 	cypress_populate_smc_voltage_tables(rdev, table);
 
 	switch (rdev->pm.int_thermal_type) {
-        case THERMAL_TYPE_EVERGREEN:
-        case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
 		break;
-        case THERMAL_TYPE_NONE:
+	case THERMAL_TYPE_NONE:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
 		break;
-        default:
+	default:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
 		break;
 	}
@@ -1860,37 +1859,37 @@
 	case MC_SEQ_RAS_TIMING >> 2:
 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_CAS_TIMING >> 2:
+	case MC_SEQ_CAS_TIMING >> 2:
 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING >> 2:
+	case MC_SEQ_MISC_TIMING >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING2 >> 2:
+	case MC_SEQ_MISC_TIMING2 >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D0 >> 2:
+	case MC_SEQ_RD_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D1 >> 2:
+	case MC_SEQ_RD_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D0 >> 2:
+	case MC_SEQ_WR_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D1 >> 2:
+	case MC_SEQ_WR_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
 		break;
-        case MC_PMG_CMD_EMRS >> 2:
+	case MC_PMG_CMD_EMRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS >> 2:
+	case MC_PMG_CMD_MRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS1 >> 2:
+	case MC_PMG_CMD_MRS1 >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
 		break;
-        default:
+	default:
 		result = false;
 		break;
 	}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 4a09947..35e0fc3 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -192,9 +192,9 @@
 
 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
 {
-        struct ci_power_info *pi = rdev->pm.dpm.priv;
+	struct ci_power_info *pi = rdev->pm.dpm.priv;
 
-        return pi;
+	return pi;
 }
 
 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
@@ -1632,7 +1632,7 @@
 	else
 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
 
-        ci_set_power_limit(rdev, power_limit);
+	ci_set_power_limit(rdev, power_limit);
 
 	if (pi->caps_automatic_dc_transition) {
 		if (ac_power)
@@ -2017,9 +2017,9 @@
 {
 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
 
-        tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
-        tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
-                DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
 
 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
 }
@@ -2938,8 +2938,8 @@
 
 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
-        memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
-        memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
 
 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
@@ -3152,7 +3152,7 @@
 
 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
-        spll_func_cntl_3 |= SPLL_DITHEN;
+	spll_func_cntl_3 |= SPLL_DITHEN;
 
 	if (pi->caps_sclk_ss_support) {
 		struct radeon_atom_ss ss;
@@ -3229,7 +3229,7 @@
 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
-        graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
@@ -4393,7 +4393,7 @@
 		break;
 	case MC_SEQ_CAS_TIMING >> 2:
 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
-            break;
+		break;
 	case MC_SEQ_MISC_TIMING >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
 		break;
@@ -4625,7 +4625,7 @@
 	if (ret)
 		goto init_mc_done;
 
-        ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
 	if (ret)
 		goto init_mc_done;
 
@@ -4916,7 +4916,7 @@
 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
-        rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
 
 	return 0;
@@ -5517,7 +5517,7 @@
 	struct _NonClockInfoArray *non_clock_info_array;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 	struct ci_ps *ps;
@@ -5693,8 +5693,8 @@
 		return ret;
 	}
 
-        pi->dll_default_on = false;
-        pi->sram_end = SMC_RAM_END;
+	pi->dll_default_on = false;
+	pi->sram_end = SMC_RAM_END;
 
 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
@@ -5734,9 +5734,9 @@
 	pi->caps_uvd_dpm = true;
 	pi->caps_vce_dpm = true;
 
-        ci_get_leakage_voltages(rdev);
-        ci_patch_dependency_tables_with_leakage(rdev);
-        ci_set_private_data_variables_based_on_pptable(rdev);
+	ci_get_leakage_voltages(rdev);
+	ci_patch_dependency_tables_with_leakage(rdev);
+	ci_set_private_data_variables_based_on_pptable(rdev);
 
 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
@@ -5839,7 +5839,7 @@
 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
 		else
 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
-        }
+	}
 
 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
@@ -5860,7 +5860,7 @@
 #endif
 
 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset)) {
+				   &frev, &crev, &data_offset)) {
 		pi->caps_sclk_ss_support = true;
 		pi->caps_mclk_ss_support = true;
 		pi->dynamic_ss = true;
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 35c6f648..24760ee 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -194,11 +194,11 @@
 		return PPSMC_Result_OK;
 
 	for (i = 0; i < rdev->usec_timeout; i++) {
-                tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
-                if ((tmp & CKEN) == 0)
+		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & CKEN) == 0)
 			break;
-                udelay(1);
-        }
+		udelay(1);
+	}
 
 	return PPSMC_Result_OK;
 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4c30d8c..8ac82df 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1712,7 +1712,7 @@
  */
 u32 cik_get_xclk(struct radeon_device *rdev)
 {
-        u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
 
 	if (rdev->flags & RADEON_IS_IGP) {
 		if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
@@ -2343,9 +2343,13 @@
  */
 static void cik_tiling_mode_table_init(struct radeon_device *rdev)
 {
-	const u32 num_tile_mode_states = 32;
-	const u32 num_secondary_tile_mode_states = 16;
-	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+	u32 *tile = rdev->config.cik.tile_mode_array;
+	u32 *macrotile = rdev->config.cik.macrotile_mode_array;
+	const u32 num_tile_mode_states =
+			ARRAY_SIZE(rdev->config.cik.tile_mode_array);
+	const u32 num_secondary_tile_mode_states =
+			ARRAY_SIZE(rdev->config.cik.macrotile_mode_array);
+	u32 reg_offset, split_equal_to_row_size;
 	u32 num_pipe_configs;
 	u32 num_rbs = rdev->config.cik.max_backends_per_se *
 		rdev->config.cik.max_shader_engines;
@@ -2367,1032 +2371,669 @@
 	if (num_pipe_configs > 8)
 		num_pipe_configs = 16;
 
-	if (num_pipe_configs == 16) {
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 7:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 8:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 12:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 17:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 30:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_2_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_2_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_2_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else if (num_pipe_configs == 8) {
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 7:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 8:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 12:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 17:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 30:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_2_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_2_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else if (num_pipe_configs == 4) {
+	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+		tile[reg_offset] = 0;
+	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+		macrotile[reg_offset] = 0;
+
+	switch(num_pipe_configs) {
+	case 16:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			   NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+			   NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			    BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			    MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+			    NUM_BANKS(ADDR_SURF_2_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 8:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_2_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_2_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 4:
 		if (num_rbs == 4) {
-			for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-				switch (reg_offset) {
-				case 0:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
-					break;
-				case 1:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
-					break;
-				case 2:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-					break;
-				case 3:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
-					break;
-				case 4:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(split_equal_to_row_size));
-					break;
-				case 5:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-					break;
-				case 6:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-					break;
-				case 7:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 TILE_SPLIT(split_equal_to_row_size));
-					break;
-				case 8:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16));
-					break;
-				case 9:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-					break;
-				case 10:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 11:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 12:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 13:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-					break;
-				case 14:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 16:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 17:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 27:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-					break;
-				case 28:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 29:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 30:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				default:
-					gb_tile_moden = 0;
-					break;
-				}
-				rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
-				WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-			}
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
 		} else if (num_rbs < 4) {
-			for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-				switch (reg_offset) {
-				case 0:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
-					break;
-				case 1:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
-					break;
-				case 2:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-					break;
-				case 3:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
-					break;
-				case 4:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(split_equal_to_row_size));
-					break;
-				case 5:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-					break;
-				case 6:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-					break;
-				case 7:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 TILE_SPLIT(split_equal_to_row_size));
-					break;
-				case 8:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16));
-					break;
-				case 9:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
-					break;
-				case 10:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 11:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 12:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 13:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-					break;
-				case 14:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 16:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 17:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 27:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
-					break;
-				case 28:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 29:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				case 30:
-					gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-							 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-							 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-							 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-					break;
-				default:
-					gb_tile_moden = 0;
-					break;
-				}
-				rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
-				WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-			}
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16));
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
 		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
-						 NUM_BANKS(ADDR_SURF_4_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else if (num_pipe_configs == 2) {
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
-				break;
-			case 1:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
-				break;
-			case 2:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 3:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
-				break;
-			case 4:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 5:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
-				break;
-			case 6:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
-				break;
-			case 7:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 TILE_SPLIT(split_equal_to_row_size));
-				break;
-			case 8:
-				gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						PIPE_CONFIG(ADDR_SURF_P2);
-				break;
-			case 9:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2));
-				break;
-			case 10:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 11:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 12:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 13:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
-				break;
-			case 14:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 16:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 17:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 27:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2));
-				break;
-			case 28:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 29:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			case 30:
-				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P2) |
-						 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 1:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 2:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 3:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 4:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 5:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 6:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			case 8:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 9:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 10:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 11:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 12:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 13:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
-						 NUM_BANKS(ADDR_SURF_16_BANK));
-				break;
-			case 14:
-				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
-						 NUM_BANKS(ADDR_SURF_8_BANK));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+				NUM_BANKS(ADDR_SURF_4_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	case 2:
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+		tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+		tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2) |
+			   TILE_SPLIT(split_equal_to_row_size));
+		tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   PIPE_CONFIG(ADDR_SURF_P2);
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P2));
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2));
+		tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+		tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+			    PIPE_CONFIG(ADDR_SURF_P2) |
+			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+				NUM_BANKS(ADDR_SURF_16_BANK));
+		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+				NUM_BANKS(ADDR_SURF_8_BANK));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+		break;
+
+	default:
 		DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
+	}
 }
 
 /**
@@ -4219,13 +3860,20 @@
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		return r;
 	}
-	r = radeon_fence_wait(ib.fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		radeon_scratch_free(rdev, scratch);
 		radeon_ib_free(rdev, &ib);
 		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		radeon_scratch_free(rdev, scratch);
+		radeon_ib_free(rdev, &ib);
+		return -ETIMEDOUT;
 	}
+	r = 0;
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
 		if (tmp == 0xDEADBEEF)
@@ -9702,13 +9350,13 @@
 	mutex_lock(&rdev->gpu_clock_mutex);
 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
-	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
 	mutex_unlock(&rdev->gpu_clock_mutex);
 	return clock;
 }
 
 static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
-                              u32 cntl_reg, u32 status_reg)
+			     u32 cntl_reg, u32 status_reg)
 {
 	int r, i;
 	struct atom_clock_dividers dividers;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d16f2ee..9c351dc 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -737,11 +737,16 @@
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		return r;
 	}
-	r = radeon_fence_wait(ib.fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		return -ETIMEDOUT;
 	}
+	r = 0;
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
 		if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index ca05858..a4edd07 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1620,14 +1620,14 @@
 	cypress_populate_smc_voltage_tables(rdev, table);
 
 	switch (rdev->pm.int_thermal_type) {
-        case THERMAL_TYPE_EVERGREEN:
-        case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
 		break;
-        case THERMAL_TYPE_NONE:
+	case THERMAL_TYPE_NONE:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
 		break;
-        default:
+	default:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
 		break;
 	}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2ad4628..76c4bdf 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1140,7 +1140,7 @@
 	int r, i;
 	struct atom_clock_dividers dividers;
 
-        r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 					   clock, false, &dividers);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 46f87d4..9e93205 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1816,8 +1816,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         (idx_value & 0xfffffff0) +
-		         ((u64)(tmp & 0xff) << 32);
+			 (idx_value & 0xfffffff0) +
+			 ((u64)(tmp & 0xff) << 32);
 
 		ib[idx + 0] = offset;
 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1862,8 +1862,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         idx_value +
-		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
 		ib[idx+0] = offset;
 		ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1897,8 +1897,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         idx_value +
-		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
 		ib[idx+0] = offset;
 		ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1925,8 +1925,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         radeon_get_ib_value(p, idx+1) +
-		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+			 radeon_get_ib_value(p, idx+1) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 		ib[idx+1] = offset;
 		ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2098,8 +2098,8 @@
 			}
 
 			offset = reloc->gpu_offset +
-			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
-			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2239,8 +2239,8 @@
 				return -EINVAL;
 			}
 			offset = reloc->gpu_offset +
-			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
-			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 			ib[idx+1] = offset & 0xfffffff8;
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2261,8 +2261,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
-		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 		ib[idx+1] = offset & 0xfffffffc;
 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -2283,8 +2283,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
-		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 		ib[idx+1] = offset & 0xfffffffc;
 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 3cf04a2..f766c96 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -206,7 +206,7 @@
  * build a AVI Info Frame
  */
 void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
-    unsigned char *buffer, size_t size)
+			      unsigned char *buffer, size_t size)
 {
 	uint8_t *frame = buffer + 3;
 
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 2d71da4..d024074 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2640,7 +2640,7 @@
 	struct _NonClockInfoArray *non_clock_info_array;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 	struct kv_ps *ps;
@@ -2738,7 +2738,7 @@
 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
 		pi->at[i] = TRINITY_AT_DFLT;
 
-        pi->sram_end = SMC_RAM_END;
+	pi->sram_end = SMC_RAM_END;
 
 	/* Enabling nb dpm on an asrock system prevents dpm from working */
 	if (rdev->pdev->subsystem_vendor == 0x1849)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 158872e..b88d63c9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1257,7 +1257,7 @@
 		tmp = RREG32_CG(CG_CGTT_LOCAL_0);
 		tmp &= ~0x00380000;
 		WREG32_CG(CG_CGTT_LOCAL_0, tmp);
-                tmp = RREG32_CG(CG_CGTT_LOCAL_1);
+		tmp = RREG32_CG(CG_CGTT_LOCAL_1);
 		tmp &= ~0x0e000000;
 		WREG32_CG(CG_CGTT_LOCAL_1, tmp);
 	}
@@ -2634,7 +2634,7 @@
 	struct atom_clock_dividers dividers;
 	int r, i;
 
-        r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
 					   ecclk, false, &dividers);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c3d531a..4a601f9 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -725,9 +725,9 @@
 
 struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
 {
-        struct ni_power_info *pi = rdev->pm.dpm.priv;
+	struct ni_power_info *pi = rdev->pm.dpm.priv;
 
-        return pi;
+	return pi;
 }
 
 struct ni_ps *ni_get_ps(struct radeon_ps *rps)
@@ -1096,9 +1096,9 @@
 
 static int ni_process_firmware_header(struct radeon_device *rdev)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
-        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	u32 tmp;
 	int ret;
 
@@ -1202,14 +1202,14 @@
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 
 	if (pi->gfx_clock_gating) {
-                WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
 		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
-                WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
 		RREG32(GB_ADDR_CONFIG);
-        }
+	}
 
 	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
-                 ~HOST_SMC_MSG_MASK);
+		 ~HOST_SMC_MSG_MASK);
 
 	udelay(25000);
 
@@ -1321,12 +1321,12 @@
 				   u32 mclk,
 				   NISLANDS_SMC_VOLTAGE_VALUE *voltage)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 
 	if (!pi->mvdd_control) {
 		voltage->index = eg_pi->mvdd_high_index;
-                voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
 		return;
 	}
 
@@ -1510,47 +1510,47 @@
 	u32 mc_cg_config;
 
 	switch (arb_freq_src) {
-        case MC_CG_ARB_FREQ_F0:
+	case MC_CG_ARB_FREQ_F0:
 		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
 		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
 		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
 		break;
-        case MC_CG_ARB_FREQ_F1:
+	case MC_CG_ARB_FREQ_F1:
 		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
 		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
 		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
 		break;
-        case MC_CG_ARB_FREQ_F2:
+	case MC_CG_ARB_FREQ_F2:
 		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
 		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
 		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
 		break;
-        case MC_CG_ARB_FREQ_F3:
+	case MC_CG_ARB_FREQ_F3:
 		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
 		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
 		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
 		break;
-        default:
+	default:
 		return -EINVAL;
 	}
 
 	switch (arb_freq_dest) {
-        case MC_CG_ARB_FREQ_F0:
+	case MC_CG_ARB_FREQ_F0:
 		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
 		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
 		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
 		break;
-        case MC_CG_ARB_FREQ_F1:
+	case MC_CG_ARB_FREQ_F1:
 		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
 		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
 		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
 		break;
-        case MC_CG_ARB_FREQ_F2:
+	case MC_CG_ARB_FREQ_F2:
 		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
 		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
 		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
 		break;
-        case MC_CG_ARB_FREQ_F3:
+	case MC_CG_ARB_FREQ_F3:
 		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
 		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
 		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
@@ -1621,9 +1621,7 @@
 		(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
 
 
-	radeon_atom_set_engine_dram_timings(rdev,
-                                            pl->sclk,
-                                            pl->mclk);
+	radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk);
 
 	dram_timing = RREG32(MC_ARB_DRAM_TIMING);
 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -1867,9 +1865,9 @@
 
 	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
 
-        if (pi->mem_gddr5)
-                mpll_dq_func_cntl &= ~PDNB;
-        mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
+	if (pi->mem_gddr5)
+		mpll_dq_func_cntl &= ~PDNB;
+	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
 
 
 	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
@@ -1891,15 +1889,15 @@
 			      MRDCKD1_PDNB);
 
 	dll_cntl |= (MRDCKA0_BYPASS |
-                     MRDCKA1_BYPASS |
-                     MRDCKB0_BYPASS |
-                     MRDCKB1_BYPASS |
-                     MRDCKC0_BYPASS |
-                     MRDCKC1_BYPASS |
-                     MRDCKD0_BYPASS |
-                     MRDCKD1_BYPASS);
+		     MRDCKA1_BYPASS |
+		     MRDCKB0_BYPASS |
+		     MRDCKB1_BYPASS |
+		     MRDCKC0_BYPASS |
+		     MRDCKC1_BYPASS |
+		     MRDCKD0_BYPASS |
+		     MRDCKD1_BYPASS);
 
-        spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
 
 	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
@@ -2089,7 +2087,7 @@
 
 static int ni_init_smc_spll_table(struct radeon_device *rdev)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
 	NISLANDS_SMC_SCLK_VALUE sclk_params;
@@ -2311,8 +2309,8 @@
 					 NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
 {
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
-        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	int ret;
 	bool dll_state_on;
 	u16 std_vddc;
@@ -2391,8 +2389,8 @@
 			     struct radeon_ps *radeon_state,
 			     NISLANDS_SMC_SWSTATE *smc_state)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
-        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 	struct ni_ps *state = ni_get_ps(radeon_state);
 	u32 a_t;
 	u32 t_l, t_h;
@@ -2451,8 +2449,8 @@
 						struct radeon_ps *radeon_state,
 						NISLANDS_SMC_SWSTATE *smc_state)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
-        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	struct ni_ps *state = ni_get_ps(radeon_state);
 	u32 prev_sclk;
@@ -2595,7 +2593,7 @@
 				       struct radeon_ps *radeon_new_state,
 				       bool enable)
 {
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	PPSMC_Result smc_result;
 	int ret = 0;
 
@@ -2625,7 +2623,7 @@
 					 struct radeon_ps *radeon_state,
 					 NISLANDS_SMC_SWSTATE *smc_state)
 {
-        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	struct ni_ps *state = ni_get_ps(radeon_state);
 	int i, ret;
@@ -2770,46 +2768,46 @@
 	bool result = true;
 
 	switch (in_reg) {
-        case  MC_SEQ_RAS_TIMING >> 2:
+	case  MC_SEQ_RAS_TIMING >> 2:
 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_CAS_TIMING >> 2:
+	case MC_SEQ_CAS_TIMING >> 2:
 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING >> 2:
+	case MC_SEQ_MISC_TIMING >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING2 >> 2:
+	case MC_SEQ_MISC_TIMING2 >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D0 >> 2:
+	case MC_SEQ_RD_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D1 >> 2:
+	case MC_SEQ_RD_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D0 >> 2:
+	case MC_SEQ_WR_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D1 >> 2:
+	case MC_SEQ_WR_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
 		break;
-        case MC_PMG_CMD_EMRS >> 2:
+	case MC_PMG_CMD_EMRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS >> 2:
+	case MC_PMG_CMD_MRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS1 >> 2:
+	case MC_PMG_CMD_MRS1 >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
 		break;
-        case MC_SEQ_PMG_TIMING >> 2:
+	case MC_SEQ_PMG_TIMING >> 2:
 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS2 >> 2:
+	case MC_PMG_CMD_MRS2 >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
 		break;
-        default:
+	default:
 		result = false;
 		break;
 	}
@@ -2876,9 +2874,9 @@
 	struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
 	u8 module_index = rv770_get_memory_module_index(rdev);
 
-        table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
-        if (!table)
-                return -ENOMEM;
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
 
 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
@@ -2896,25 +2894,25 @@
 
 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
 
-        if (ret)
-                goto init_mc_done;
+	if (ret)
+		goto init_mc_done;
 
 	ret = ni_copy_vbios_mc_reg_table(table, ni_table);
 
-        if (ret)
-                goto init_mc_done;
+	if (ret)
+		goto init_mc_done;
 
 	ni_set_s0_mc_reg_index(ni_table);
 
 	ret = ni_set_mc_special_registers(rdev, ni_table);
 
-        if (ret)
-                goto init_mc_done;
+	if (ret)
+		goto init_mc_done;
 
 	ni_set_valid_flag(ni_table);
 
 init_mc_done:
-        kfree(table);
+	kfree(table);
 
 	return ret;
 }
@@ -2994,7 +2992,7 @@
 {
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
 	SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
 
@@ -3025,7 +3023,7 @@
 {
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
 	SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
 	u16 address;
@@ -3142,7 +3140,7 @@
 	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 	PP_NIslands_CACTABLES *cac_tables = NULL;
 	int i, ret;
-        u32 reg;
+	u32 reg;
 
 	if (ni_pi->enable_cac == false)
 		return 0;
@@ -3422,13 +3420,13 @@
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
 
 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
-            (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+	    (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
 		if (eg_pi->pcie_performance_request_registered == false)
 			radeon_acpi_pcie_notify_device_ready(rdev);
 		eg_pi->pcie_performance_request_registered = true;
 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
 	} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
-                   eg_pi->pcie_performance_request_registered) {
+		    eg_pi->pcie_performance_request_registered) {
 		eg_pi->pcie_performance_request_registered = false;
 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
 	}
@@ -3441,12 +3439,12 @@
 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 	u32 tmp;
 
-        tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 
-        if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
-            (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
-                pi->pcie_gen2 = true;
-        else
+	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+		pi->pcie_gen2 = true;
+	else
 		pi->pcie_gen2 = false;
 
 	if (!pi->pcie_gen2)
@@ -3458,8 +3456,8 @@
 static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
 					    bool enable)
 {
-        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
-        u32 tmp, bif;
+	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+	u32 tmp, bif;
 
 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
 
@@ -3502,7 +3500,7 @@
 	if (enable)
 		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
 	else
-                WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
 }
 
 void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
@@ -3563,7 +3561,7 @@
 {
 	struct ni_ps *new_ps = ni_get_ps(rps);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 
 	eg_pi->current_rps = *rps;
 	ni_pi->current_ps = *new_ps;
@@ -3575,7 +3573,7 @@
 {
 	struct ni_ps *new_ps = ni_get_ps(rps);
 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
-        struct ni_power_info *ni_pi = ni_get_pi(rdev);
+	struct ni_power_info *ni_pi = ni_get_pi(rdev);
 
 	eg_pi->requested_rps = *rps;
 	ni_pi->requested_ps = *new_ps;
@@ -3591,8 +3589,8 @@
 
 	if (pi->gfx_clock_gating)
 		ni_cg_clockgating_default(rdev);
-        if (btc_dpm_enabled(rdev))
-                return -EINVAL;
+	if (btc_dpm_enabled(rdev))
+		return -EINVAL;
 	if (pi->mg_clock_gating)
 		ni_mg_clockgating_default(rdev);
 	if (eg_pi->ls_clock_gating)
@@ -3991,7 +3989,7 @@
 	union pplib_clock_info *clock_info;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	struct ni_ps *ps;
 
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5eae0a8..6e478a2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3732,11 +3732,17 @@
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		goto free_ib;
 	}
-	r = radeon_fence_wait(ib.fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		goto free_ib;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto free_ib;
 	}
+	r = 0;
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
 		if (tmp == 0xDEADBEEF) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cc2fdf0..f86ab69 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -235,8 +235,8 @@
 		fb_div |= 1;
 
 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
-        if (r)
-                return r;
+	if (r)
+		return r;
 
 	/* assert PLL_RESET */
 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
@@ -1490,7 +1490,7 @@
 					rdev->fastfb_working = true;
 				}
 			}
-  		}
+		}
 	}
 
 	radeon_update_bandwidth_info(rdev);
@@ -3381,11 +3381,17 @@
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		goto free_ib;
 	}
-	r = radeon_fence_wait(ib.fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		goto free_ib;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto free_ib;
 	}
+	r = 0;
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
 		if (tmp == 0xDEADBEEF)
@@ -4568,7 +4574,7 @@
 	mutex_lock(&rdev->gpu_clock_mutex);
 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
-	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
 	mutex_unlock(&rdev->gpu_clock_mutex);
 	return clock;
 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 2f36fa15..b69c8de 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1671,8 +1671,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         (idx_value & 0xfffffff0) +
-		         ((u64)(tmp & 0xff) << 32);
+			 (idx_value & 0xfffffff0) +
+			 ((u64)(tmp & 0xff) << 32);
 
 		ib[idx + 0] = offset;
 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1712,8 +1712,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         idx_value +
-		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+			 idx_value +
+			 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
 
 		ib[idx+0] = offset;
 		ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1764,8 +1764,8 @@
 			}
 
 			offset = reloc->gpu_offset +
-			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
-			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1876,8 +1876,8 @@
 				return -EINVAL;
 			}
 			offset = reloc->gpu_offset +
-			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
-			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+				 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+				 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 			ib[idx+1] = offset & 0xfffffff8;
 			ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1898,8 +1898,8 @@
 		}
 
 		offset = reloc->gpu_offset +
-		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
-		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+			 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
 
 		ib[idx+1] = offset & 0xfffffffc;
 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index d2dd29a..fb65e6f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -368,11 +368,16 @@
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		return r;
 	}
-	r = radeon_fence_wait(ib.fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		return r;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		return -ETIMEDOUT;
 	}
+	r = 0;
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
 		if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa21544..6a4b020 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -844,7 +844,7 @@
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 
 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -874,7 +874,7 @@
 	union fan_info *fan_info;
 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	int ret, i;
 
@@ -1070,7 +1070,7 @@
 			ext_hdr->usVCETableOffset) {
 			VCEClockInfoArray *array = (VCEClockInfoArray *)
 				(mode_info->atom_context->bios + data_offset +
-                                 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
 				(mode_info->atom_context->bios + data_offset +
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e85894a..e82a99c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -215,7 +215,7 @@
  * build a HDMI Video Info Frame
  */
 void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
-    unsigned char *buffer, size_t size)
+			 unsigned char *buffer, size_t size)
 {
 	uint8_t *frame = buffer + 3;
 
@@ -312,7 +312,7 @@
 }
 
 void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
-    struct radeon_crtc *crtc, unsigned int clock)
+			     struct radeon_crtc *crtc, unsigned int clock)
 {
 	struct radeon_encoder *radeon_encoder;
 	struct radeon_encoder_atom_dig *dig;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 78a51b3..007be29 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -120,6 +120,7 @@
  */
 #define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
 #define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
+#define RADEON_USEC_IB_TEST_TIMEOUT		1000000 /* 1s */
 /* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE			16
 #define RADEON_DEBUGFS_MAX_COMPONENTS		32
@@ -382,6 +383,7 @@
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
 int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
index be4af76..cd872f7 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.h
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -291,6 +291,8 @@
 #       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
 #       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
 #       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#       define ATPX_DGPU_CAN_DRIVE_DISPLAYS                        (1 << 12)
+#       define ATPX_MS_HYBRID_GFX_SUPPORTED                        (1 << 14)
 #define ATPX_FUNCTION_POWER_CONTROL                                0x2
 /* ARG0: ATPX_FUNCTION_POWER_CONTROL
  * ARG1:
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index de9a2ff..f8097a0 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2095,7 +2095,7 @@
 	struct radeon_i2c_bus_rec i2c_bus;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 
 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2575,7 +2575,7 @@
 	bool valid;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 
 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2666,7 +2666,7 @@
 	bool valid;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f29..fd8c4d3 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@
 	return radeon_atpx_priv.atpx_detected;
 }
 
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+	return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -141,10 +145,6 @@
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
-	/* make sure required functions are enabled */
-	/* dGPU power control is required */
-	atpx->functions.power_cntl = true;
-
 	if (atpx->functions.px_params) {
 		union acpi_object *info;
 		struct atpx_px_params output;
@@ -551,13 +551,14 @@
 void radeon_register_atpx_handler(void)
 {
 	bool r;
+	enum vga_switcheroo_handler_flags_t handler_flags = 0;
 
 	/* detect if we have any ATPX + 2 VGA in the system */
 	r = radeon_atpx_detect();
 	if (!r)
 		return;
 
-	vga_switcheroo_register_handler(&radeon_atpx_handler);
+	vga_switcheroo_register_handler(&radeon_atpx_handler, handler_flags);
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 340f3f5..cfcc099 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -34,6 +34,7 @@
 #include "atom.h"
 
 #include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
 
 static int radeon_dp_handle_hpd(struct drm_connector *connector)
 {
@@ -344,6 +345,11 @@
 		else if (radeon_connector->ddc_bus)
 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
 							      &radeon_connector->ddc_bus->adapter);
+	} else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC &&
+		   connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+		   radeon_connector->ddc_bus) {
+		radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base,
+								 &radeon_connector->ddc_bus->adapter);
 	} else if (radeon_connector->ddc_bus) {
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base,
 						      &radeon_connector->ddc_bus->adapter);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4197ca1..4fd1a96 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@
 	"LAST",
 };
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
@@ -1155,9 +1161,9 @@
 		radeon_vm_size = 4;
 	}
 
-       /*
-        * Max GPUVM size for Cayman, SI and CI are 40 bits.
-        */
+	/*
+	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
+	 */
 	if (radeon_vm_size > 1024) {
 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
 			 radeon_vm_size);
@@ -1433,7 +1439,7 @@
 	 * ignore it */
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 
-	if (rdev->flags & RADEON_IS_PX)
+	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
 		runtime = true;
 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
 	if (runtime)
@@ -1895,7 +1901,7 @@
 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
 		DRM_ERROR("Reached maximum number of debugfs components.\n");
 		DRM_ERROR("Report so we increase "
-		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+			  "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
 		return -EINVAL;
 	}
 	rdev->debugfs[rdev->debugfs_count].files = files;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2d9196a..fcc7483 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -407,7 +407,7 @@
 	unsigned repcnt = 4;
 	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
-        down_read(&rdev->exclusive_lock);
+	down_read(&rdev->exclusive_lock);
 	if (work->fence) {
 		struct radeon_fence *fence;
 
@@ -919,7 +919,7 @@
 	*den /= tmp;
 
 	/* make sure nominator is large enough */
-        if (*nom < nom_min) {
+	if (*nom < nom_min) {
 		tmp = DIV_ROUND_UP(nom_min, *nom);
 		*nom *= tmp;
 		*den *= tmp;
@@ -959,7 +959,7 @@
 	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
 
 	/* limit fb divider to its maximum */
-        if (*fb_div > fb_div_max) {
+	if (*fb_div > fb_div_max) {
 		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
 		*fb_div = fb_div_max;
 	}
@@ -1683,10 +1683,8 @@
 	/* setup afmt */
 	radeon_afmt_init(rdev);
 
-	if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
-		radeon_fbdev_init(rdev);
-		drm_kms_helper_poll_init(rdev->ddev);
-	}
+	radeon_fbdev_init(rdev);
+	drm_kms_helper_poll_init(rdev->ddev);
 
 	/* do pm late init */
 	ret = radeon_pm_late_init(rdev);
@@ -1699,6 +1697,9 @@
 	radeon_fbdev_fini(rdev);
 	kfree(rdev->mode_info.bios_hardcoded_edid);
 
+	/* free i2c buses */
+	radeon_i2c_fini(rdev);
+
 	if (rdev->mode_info.mode_config_initialized) {
 		radeon_afmt_fini(rdev);
 		drm_kms_helper_poll_fini(rdev->ddev);
@@ -1706,8 +1707,6 @@
 		drm_mode_config_cleanup(rdev->ddev);
 		rdev->mode_info.mode_config_initialized = false;
 	}
-	/* free i2c buses */
-	radeon_i2c_fini(rdev);
 }
 
 static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e266ffc..ccd4ad4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,9 +34,11 @@
 #include "radeon_drv.h"
 
 #include <drm/drm_pciids.h>
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_gem.h>
 
@@ -319,6 +321,23 @@
 {
 	int ret;
 
+	/*
+	 * Initialize amdkfd before starting radeon. If it was not loaded yet,
+	 * defer radeon probing
+	 */
+	ret = radeon_kfd_init();
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	/*
+	 * apple-gmux is needed on dual GPU MacBook Pro
+	 * to probe the panel if we're the inactive GPU.
+	 */
+	if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+	    apple_gmux_present() && pdev != vga_default_device() &&
+	    !vga_switcheroo_handler_flags())
+		return -EPROBE_DEFER;
+
 	/* Get rid of things like offb */
 	ret = radeon_kick_out_firmware_fb(pdev);
 	if (ret)
@@ -570,8 +589,6 @@
 		return -EINVAL;
 	}
 
-	radeon_kfd_init();
-
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d2e628e..0e3143a 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -38,9 +38,9 @@
 #include <linux/vga_switcheroo.h>
 
 /* object hierarchy -
-   this contains a helper + a radeon fb
-   the helper contains a pointer to radeon framebuffer baseclass.
-*/
+ * this contains a helper + a radeon fb
+ * the helper contains a pointer to radeon framebuffer baseclass.
+ */
 struct radeon_fbdev {
 	struct drm_fb_helper helper;
 	struct radeon_framebuffer rfb;
@@ -292,7 +292,8 @@
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev)
 {
-	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
 }
 
 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -325,6 +326,10 @@
 	int bpp_sel = 32;
 	int ret;
 
+	/* don't enable fbdev if no connectors */
+	if (list_empty(&rdev->ddev->mode_config.connector_list))
+		return 0;
+
 	/* select 8 bpp console on RN50 or 16MB cards */
 	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
 		bpp_sel = 8;
@@ -377,11 +382,15 @@
 
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
 {
-	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+	if (rdev->mode_info.rfbdev)
+		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
 }
 
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
 {
+	if (!rdev->mode_info.rfbdev)
+		return false;
+
 	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
 		return true;
 	return false;
@@ -389,12 +398,14 @@
 
 void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
 {
-	drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
 
 void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
 {
-	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+	if (rdev->mode_info.rfbdev)
+		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
 
 void radeon_fbdev_restore_mode(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 05815c4..7ef075a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -527,6 +527,46 @@
 }
 
 /**
+ * radeon_fence_wait_timeout - wait for a fence to signal with timeout
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptible sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
+ */
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
+{
+	uint64_t seq[RADEON_NUM_RINGS] = {};
+	long r;
+	int r_sig;
+
+	/*
+	 * This function should not be called on !radeon fences.
+	 * If this is the case, it would mean this function can
+	 * also be called on radeon fences belonging to another card.
+	 * exclusive_lock is not held in that case.
+	 */
+	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
+		return fence_wait(&fence->base, intr);
+
+	seq[fence->ring] = fence->seq;
+	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
+	if (r <= 0) {
+		return r;
+	}
+
+	r_sig = fence_signal(&fence->base);
+	if (!r_sig)
+		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+	return r;
+}
+
+/**
  * radeon_fence_wait - wait for a fence to signal
  *
  * @fence: radeon fence object
@@ -539,28 +579,12 @@
  */
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
-	uint64_t seq[RADEON_NUM_RINGS] = {};
-	long r;
-
-	/*
-	 * This function should not be called on !radeon fences.
-	 * If this is the case, it would mean this function can
-	 * also be called on radeon fences belonging to another card.
-	 * exclusive_lock is not held in that case.
-	 */
-	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
-		return fence_wait(&fence->base, intr);
-
-	seq[fence->ring] = fence->seq;
-	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
+	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+	if (r > 0) {
+		return 0;
+	} else {
 		return r;
 	}
-
-	r = fence_signal(&fence->base);
-	if (!r)
-		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
-	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index c39ce1f..92ce0e5 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -274,7 +274,7 @@
 			if (i == RADEON_RING_TYPE_GFX_INDEX) {
 				/* oh, oh, that's really bad */
 				DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
-		                rdev->accel_working = false;
+				rdev->accel_working = false;
 				return r;
 
 			} else {
@@ -304,7 +304,7 @@
 }
 
 static struct drm_info_list radeon_debugfs_sa_list[] = {
-        {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+	{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
 };
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 9a4d69e..87a9ebb 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -132,35 +132,34 @@
 
 static const struct kgd2kfd_calls *kgd2kfd;
 
-bool radeon_kfd_init(void)
+int radeon_kfd_init(void)
 {
+	int ret;
+
 #if defined(CONFIG_HSA_AMD_MODULE)
-	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
 
 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
 
 	if (kgd2kfd_init_p == NULL)
-		return false;
+		return -ENOENT;
 
-	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
+	if (ret) {
 		symbol_put(kgd2kfd_init);
 		kgd2kfd = NULL;
-
-		return false;
 	}
 
-	return true;
 #elif defined(CONFIG_HSA_AMD)
-	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
+	if (ret)
 		kgd2kfd = NULL;
 
-		return false;
-	}
-
-	return true;
 #else
-	return false;
+	ret = -ENOENT;
 #endif
+
+	return ret;
 }
 
 void radeon_kfd_fini(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
index 1103f90..9df1fea 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.h
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -33,7 +33,7 @@
 
 struct radeon_device;
 
-bool radeon_kfd_init(void);
+int radeon_kfd_init(void);
 void radeon_kfd_fini(void);
 
 void radeon_kfd_suspend(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 32b338f..24152df 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,13 @@
 									 RADEON_CRTC_DISP_REQ_EN_B));
 			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
 		}
-		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_on(dev, radeon_crtc->crtc_id);
 		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		drm_vblank_off(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->crtc_id)
 			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
 		else {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 88dc973..868c3ba 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -818,52 +818,52 @@
 	tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
 		~(RADEON_TMDS_TRANSMITTER_PLLRST);
 
-    if (rdev->family == CHIP_R200 ||
-	rdev->family == CHIP_R100 ||
-	ASIC_IS_R300(rdev))
-	    tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
-    else /* RV chips got this bit reversed */
-	    tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+	if (rdev->family == CHIP_R200 ||
+	    rdev->family == CHIP_R100 ||
+	    ASIC_IS_R300(rdev))
+		tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+	else /* RV chips got this bit reversed */
+		tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
 
-    fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
-		   (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
-		    RADEON_FP_CRTC_DONT_SHADOW_HEND));
+	fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+		      (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+		       RADEON_FP_CRTC_DONT_SHADOW_HEND));
 
-    fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+	fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
 
-    fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
-		     RADEON_FP_DFP_SYNC_SEL |
-		     RADEON_FP_CRT_SYNC_SEL |
-		     RADEON_FP_CRTC_LOCK_8DOT |
-		     RADEON_FP_USE_SHADOW_EN |
-		     RADEON_FP_CRTC_USE_SHADOW_VEND |
-		     RADEON_FP_CRT_SYNC_ALT);
+	fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+			 RADEON_FP_DFP_SYNC_SEL |
+			 RADEON_FP_CRT_SYNC_SEL |
+			 RADEON_FP_CRTC_LOCK_8DOT |
+			 RADEON_FP_USE_SHADOW_EN |
+			 RADEON_FP_CRTC_USE_SHADOW_VEND |
+			 RADEON_FP_CRT_SYNC_ALT);
 
-    if (1) /*  FIXME rgbBits == 8 */
-	    fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
-    else
-	    fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+	if (1) /*  FIXME rgbBits == 8 */
+		fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
+	else
+		fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
 
-    if (radeon_crtc->crtc_id == 0) {
-	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
-		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
-		    if (radeon_encoder->rmx_type != RMX_OFF)
-			    fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
-		    else
-			    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
-	    } else
-		    fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
-    } else {
-	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
-		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
-		    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
-	    } else
-		    fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
-    }
+	if (radeon_crtc->crtc_id == 0) {
+		if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+			fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+			else
+				fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+		} else
+			fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+	} else {
+		if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+			fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+			fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+		} else
+			fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+	}
 
-    WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
-    WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
-    WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+	WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+	WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+	WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
 
 	if (rdev->is_atom_bios)
 		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad14..dd46c38 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -214,8 +214,8 @@
 	INIT_LIST_HEAD(&bo->list);
 	INIT_LIST_HEAD(&bo->va);
 	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
-	                               RADEON_GEM_DOMAIN_GTT |
-	                               RADEON_GEM_DOMAIN_CPU);
+				       RADEON_GEM_DOMAIN_GTT |
+				       RADEON_GEM_DOMAIN_CPU);
 
 	bo->flags = flags;
 	/* PCI GART is always snooped */
@@ -848,7 +848,7 @@
  *
  */
 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
-                     bool shared)
+		     bool shared)
 {
 	struct reservation_object *resv = bo->tbo.resv;
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7a98823..38226d9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -79,7 +79,7 @@
 				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
 		}
 		mutex_unlock(&rdev->pm.mutex);
-        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 		if (rdev->pm.profile == PM_PROFILE_AUTO) {
 			mutex_lock(&rdev->pm.mutex);
 			radeon_pm_update_profile(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index e6ad54c..b0eb28e 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -56,7 +56,7 @@
 }
 
 bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
-			          struct radeon_semaphore *semaphore)
+				  struct radeon_semaphore *semaphore)
 {
 	struct radeon_ring *ring = &rdev->ring[ridx];
 
@@ -73,7 +73,7 @@
 }
 
 bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
-			        struct radeon_semaphore *semaphore)
+				struct radeon_semaphore *semaphore)
 {
 	struct radeon_ring *ring = &rdev->ring[ridx];
 
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6edcb54..6fe9e4e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -722,9 +722,11 @@
 	return r;
 }
 
-/* multiple fence commands without any stream commands in between can
-   crash the vcpu so just try to emmit a dummy create/destroy msg to
-   avoid this */
+/*
+ * multiple fence commands without any stream commands in between can
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
 			      uint32_t handle, struct radeon_fence **fence)
 {
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 7eb1ae7..c1c619f 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -166,7 +166,7 @@
 	for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
 		atomic_set(&rdev->vce.handles[i], 0);
 		rdev->vce.filp[i] = NULL;
-        }
+	}
 
 	return 0;
 }
@@ -389,7 +389,7 @@
 
 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
 	if (r) {
-	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 	}
 
 	if (fence)
@@ -446,7 +446,7 @@
 
 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
 	if (r) {
-	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 	}
 
 	if (fence)
@@ -769,18 +769,18 @@
 	radeon_ring_unlock_commit(rdev, ring, false);
 
 	for (i = 0; i < rdev->usec_timeout; i++) {
-	        if (vce_v1_0_get_rptr(rdev, ring) != rptr)
-	                break;
-	        DRM_UDELAY(1);
+		if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+			break;
+		DRM_UDELAY(1);
 	}
 
 	if (i < rdev->usec_timeout) {
-	        DRM_INFO("ring test on %d succeeded in %d usecs\n",
-	                 ring->idx, i);
+		DRM_INFO("ring test on %d succeeded in %d usecs\n",
+			 ring->idx, i);
 	} else {
-	        DRM_ERROR("radeon: ring %d test failed\n",
-	                  ring->idx);
-	        r = -ETIMEDOUT;
+		DRM_ERROR("radeon: ring %d test failed\n",
+			 ring->idx);
+		r = -ETIMEDOUT;
 	}
 
 	return r;
@@ -810,11 +810,16 @@
 		goto error;
 	}
 
-	r = radeon_fence_wait(fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
 	} else {
-	        DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+		r = 0;
 	}
 error:
 	radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 3979632..a135874 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -611,15 +611,16 @@
  */
 static uint32_t radeon_vm_page_flags(uint32_t flags)
 {
-        uint32_t hw_flags = 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
-        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
-        if (flags & RADEON_VM_PAGE_SYSTEM) {
-                hw_flags |= R600_PTE_SYSTEM;
-                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
-        }
-        return hw_flags;
+	uint32_t hw_flags = 0;
+
+	hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+	hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+	hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+	if (flags & RADEON_VM_PAGE_SYSTEM) {
+		hw_flags |= R600_PTE_SYSTEM;
+		hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+	}
+	return hw_flags;
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index cb0afe7..94b48fc 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -795,7 +795,7 @@
 	union pplib_clock_info *clock_info;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	struct igp_ps *ps;
 
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 97e5a6f..25e2930 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -209,7 +209,7 @@
 
 static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
 				    struct rv6xx_sclk_stepping *cur,
-                                    struct rv6xx_sclk_stepping *target)
+				    struct rv6xx_sclk_stepping *target)
 {
 	return (cur->post_divider > target->post_divider) &&
 		((cur->vco_frequency * target->post_divider) <=
@@ -239,7 +239,7 @@
 
 static void rv6xx_generate_steps(struct radeon_device *rdev,
 				 u32 low, u32 high,
-                                 u32 start_index, u8 *end_index)
+				 u32 start_index, u8 *end_index)
 {
 	struct rv6xx_sclk_stepping cur;
 	struct rv6xx_sclk_stepping target;
@@ -1356,23 +1356,23 @@
 	enum radeon_dpm_event_src dpm_event_src;
 
 	switch (sources) {
-        case 0:
-        default:
+	case 0:
+	default:
 		want_thermal_protection = false;
 		break;
-        case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
 		break;
 
-        case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
 		break;
 
-        case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
-		want_thermal_protection = true;
+			want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
 		break;
 	}
@@ -1879,7 +1879,7 @@
 	union pplib_clock_info *clock_info;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	struct rv6xx_ps *ps;
 
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index c4c8da5..4b85082 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -36,28 +36,28 @@
 	u32 ref = 0;
 
 	switch (encoded_ref) {
-        case 0:
+	case 0:
 		ref = 1;
 		break;
-        case 16:
+	case 16:
 		ref = 2;
 		break;
-        case 17:
+	case 17:
 		ref = 3;
 		break;
-        case 18:
+	case 18:
 		ref = 2;
 		break;
-        case 19:
+	case 19:
 		ref = 3;
 		break;
-        case 20:
+	case 20:
 		ref = 4;
 		break;
-        case 21:
+	case 21:
 		ref = 5;
 		break;
-        default:
+	default:
 		DRM_ERROR("Invalid encoded Reference Divider\n");
 		ref = 0;
 		break;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index e830c89..a010dec 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -345,27 +345,27 @@
 	int ret = 0;
 
 	switch (postdiv) {
-        case 1:
+	case 1:
 		*encoded_postdiv = 0;
 		break;
-        case 2:
+	case 2:
 		*encoded_postdiv = 1;
 		break;
-        case 4:
+	case 4:
 		*encoded_postdiv = 2;
 		break;
-        case 8:
+	case 8:
 		*encoded_postdiv = 3;
 		break;
-        case 16:
+	case 16:
 		*encoded_postdiv = 4;
 		break;
-        default:
+	default:
 		ret = -EINVAL;
 		break;
 	}
 
-    return ret;
+	return ret;
 }
 
 u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
@@ -1175,15 +1175,15 @@
 	rv770_populate_smc_mvdd_table(rdev, table);
 
 	switch (rdev->pm.int_thermal_type) {
-        case THERMAL_TYPE_RV770:
-        case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
 		break;
-        case THERMAL_TYPE_NONE:
+	case THERMAL_TYPE_NONE:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
 		break;
-        case THERMAL_TYPE_EXTERNAL_GPIO:
-        default:
+	case THERMAL_TYPE_EXTERNAL_GPIO:
+	default:
 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
 		break;
 	}
@@ -1567,18 +1567,18 @@
 	sw_smio_index =
 		(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
 	switch (sw_smio_index) {
-        case 3:
+	case 3:
 		vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
 		break;
-        case 2:
+	case 2:
 		vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
 		break;
-        case 1:
+	case 1:
 		vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
 		break;
-        case 0:
+	case 0:
 		return;
-        default:
+	default:
 		vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
 		break;
 	}
@@ -1817,21 +1817,21 @@
 	enum radeon_dpm_event_src dpm_event_src;
 
 	switch (sources) {
-        case 0:
-        default:
+	case 0:
+	default:
 		want_thermal_protection = false;
 		break;
-        case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
 		break;
 
-        case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
 		break;
 
-        case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
@@ -2273,7 +2273,7 @@
 	union pplib_clock_info *clock_info;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	struct rv7xx_ps *ps;
 
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f878d69..ae21550 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1307,7 +1307,7 @@
  */
 u32 si_get_xclk(struct radeon_device *rdev)
 {
-        u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 reference_clock = rdev->clock.spll.reference_freq;
 	u32 tmp;
 
 	tmp = RREG32(CG_CLKPIN_CNTL_2);
@@ -2442,8 +2442,10 @@
  */
 static void si_tiling_mode_table_init(struct radeon_device *rdev)
 {
-	const u32 num_tile_mode_states = 32;
-	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+	u32 *tile = rdev->config.si.tile_mode_array;
+	const u32 num_tile_mode_states =
+			ARRAY_SIZE(rdev->config.si.tile_mode_array);
+	u32 reg_offset, split_equal_to_row_size;
 
 	switch (rdev->config.si.mem_row_size_in_kb) {
 	case 1:
@@ -2458,491 +2460,442 @@
 		break;
 	}
 
-	if ((rdev->family == CHIP_TAHITI) ||
-	    (rdev->family == CHIP_PITCAIRN)) {
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:  /* non-AA compressed depth or any compressed stencil */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 1:  /* 2xAA/4xAA compressed depth only */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 2:  /* 8xAA compressed depth only */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 8:  /* 1D and 1D Array Surfaces */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 9:  /* Displayable maps. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 10:  /* Display 8bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 11:  /* Display 16bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 12:  /* Display 32bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 13:  /* Thin. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 14:  /* Thin 8 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 15:  /* Thin 16 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 16:  /* Thin 32 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 17:  /* Thin 64 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			case 21:  /* 8 bpp PRT. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 22:  /* 16 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 23:  /* 32 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 24:  /* 64 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 25:  /* 128 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-						 NUM_BANKS(ADDR_SURF_8_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else if ((rdev->family == CHIP_VERDE) ||
-		   (rdev->family == CHIP_OLAND) ||
-		   (rdev->family == CHIP_HAINAN)) {
-		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
-			switch (reg_offset) {
-			case 0:  /* non-AA compressed depth or any compressed stencil */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 1:  /* 2xAA/4xAA compressed depth only */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 2:  /* 8xAA compressed depth only */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 8:  /* 1D and 1D Array Surfaces */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 9:  /* Displayable maps. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 10:  /* Display 8bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 11:  /* Display 16bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 12:  /* Display 32bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 13:  /* Thin. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 14:  /* Thin 8 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 15:  /* Thin 16 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 16:  /* Thin 32 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 17:  /* Thin 64 bpp. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-						 TILE_SPLIT(split_equal_to_row_size) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 21:  /* 8 bpp PRT. */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 22:  /* 16 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
-				break;
-			case 23:  /* 32 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 24:  /* 64 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-						 NUM_BANKS(ADDR_SURF_16_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
-				break;
-			case 25:  /* 128 bpp PRT */
-				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-						 NUM_BANKS(ADDR_SURF_8_BANK) |
-						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
-				break;
-			default:
-				gb_tile_moden = 0;
-				break;
-			}
-			rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
-			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
-		}
-	} else
+	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+		tile[reg_offset] = 0;
+
+	switch(rdev->family) {
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+		/* non-AA compressed depth or any compressed stencil */
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 2xAA/4xAA compressed depth only */
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 8xAA compressed depth only */
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 1D and 1D Array Surfaces */
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Displayable maps. */
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 8bpp. */
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 16bpp. */
+		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 32bpp. */
+		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin. */
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 8 bpp. */
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 16 bpp. */
+		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 32 bpp. */
+		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* Thin 64 bpp. */
+		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+		/* 8 bpp PRT. */
+		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 16 bpp PRT */
+		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 32 bpp PRT */
+		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 64 bpp PRT */
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 128 bpp PRT */
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+			   NUM_BANKS(ADDR_SURF_8_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		break;
+
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		/* non-AA compressed depth or any compressed stencil */
+		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 2xAA/4xAA compressed depth only */
+		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 8xAA compressed depth only */
+		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 1D and 1D Array Surfaces */
+		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Displayable maps. */
+		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 8bpp. */
+		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* Display 16bpp. */
+		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Display 32bpp. */
+		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin. */
+		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 8 bpp. */
+		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 16 bpp. */
+		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 32 bpp. */
+		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* Thin 64 bpp. */
+		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+			   TILE_SPLIT(split_equal_to_row_size) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 8 bpp PRT. */
+		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 16 bpp PRT */
+		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+		/* 32 bpp PRT */
+		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 64 bpp PRT */
+		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+			   NUM_BANKS(ADDR_SURF_16_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+		/* 128 bpp PRT */
+		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+			   NUM_BANKS(ADDR_SURF_8_BANK) |
+			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+		break;
+
+	default:
 		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+	}
 }
 
 static void si_select_se_sh(struct radeon_device *rdev,
@@ -7314,7 +7267,7 @@
 	mutex_lock(&rdev->gpu_clock_mutex);
 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
-	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
 	mutex_unlock(&rdev->gpu_clock_mutex);
 	return clock;
 }
@@ -7775,33 +7728,33 @@
 
 int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
 {
-        unsigned i;
+	unsigned i;
 
-        /* make sure VCEPLL_CTLREQ is deasserted */
-        WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+	/* make sure VCEPLL_CTLREQ is deasserted */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
 
-        mdelay(10);
+	mdelay(10);
 
-        /* assert UPLL_CTLREQ */
-        WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+	/* assert UPLL_CTLREQ */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
 
-        /* wait for CTLACK and CTLACK2 to get asserted */
-        for (i = 0; i < 100; ++i) {
-                uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
-                if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
-                        break;
-                mdelay(10);
-        }
+	/* wait for CTLACK and CTLACK2 to get asserted */
+	for (i = 0; i < 100; ++i) {
+		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
+			break;
+		mdelay(10);
+	}
 
-        /* deassert UPLL_CTLREQ */
-        WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+	/* deassert UPLL_CTLREQ */
+	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
 
-        if (i == 100) {
-                DRM_ERROR("Timeout setting UVD clocks!\n");
-                return -ETIMEDOUT;
-        }
+	if (i == 100) {
+		DRM_ERROR("Timeout setting UVD clocks!\n");
+		return -ETIMEDOUT;
+	}
 
-        return 0;
+	return 0;
 }
 
 int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891..cb75ab7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -499,7 +499,7 @@
 
 static const struct si_cac_config_reg cac_override_pitcairn[] =
 {
-    { 0xFFFFFFFF }
+	{ 0xFFFFFFFF }
 };
 
 static const struct si_powertune_data powertune_data_pitcairn =
@@ -991,7 +991,7 @@
 
 static const struct si_cac_config_reg cac_override_cape_verde[] =
 {
-    { 0xFFFFFFFF }
+	{ 0xFFFFFFFF }
 };
 
 static const struct si_powertune_data powertune_data_cape_verde =
@@ -1762,9 +1762,9 @@
 
 static struct si_power_info *si_get_pi(struct radeon_device *rdev)
 {
-        struct si_power_info *pi = rdev->pm.dpm.priv;
+	struct si_power_info *pi = rdev->pm.dpm.priv;
 
-        return pi;
+	return pi;
 }
 
 static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
@@ -3150,9 +3150,9 @@
 		}
 	}
 
-        for (i = 0; i < ps->performance_level_count; i++)
-                btc_adjust_clock_combinations(rdev, max_limits,
-                                              &ps->performance_levels[i]);
+	for (i = 0; i < ps->performance_level_count; i++)
+		btc_adjust_clock_combinations(rdev, max_limits,
+					      &ps->performance_levels[i]);
 
 	for (i = 0; i < ps->performance_level_count; i++) {
 		if (ps->performance_levels[i].vddc < min_vce_voltage)
@@ -3291,7 +3291,7 @@
 	case 0:
 	default:
 		want_thermal_protection = false;
-                break;
+		break;
 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
 		want_thermal_protection = true;
 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
@@ -3493,7 +3493,7 @@
 	if (ret)
 		return ret;
 
-        si_pi->state_table_start = tmp;
+	si_pi->state_table_start = tmp;
 
 	ret = si_read_smc_sram_dword(rdev,
 				     SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
@@ -3652,7 +3652,7 @@
 	si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
 
 	voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
-        backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+	backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
 
 	if (voltage_response_time == 0)
 		voltage_response_time = 1000;
@@ -3760,7 +3760,7 @@
 			       &pi->pbsu);
 
 
-        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+	pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
 	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
 
 	WREG32(CG_BSP, pi->dsp);
@@ -4308,7 +4308,7 @@
 
 	radeon_atom_set_engine_dram_timings(rdev,
 					    pl->sclk,
-                                            pl->mclk);
+					    pl->mclk);
 
 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -4343,7 +4343,7 @@
 					   si_pi->sram_end);
 		if (ret)
 			break;
-        }
+	}
 
 	return ret;
 }
@@ -4821,9 +4821,9 @@
 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
 	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
 
-        spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
-        spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
-        spll_func_cntl_3 |= SPLL_DITHEN;
+	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+	spll_func_cntl_3 |= SPLL_DITHEN;
 
 	if (pi->sclk_ss) {
 		struct radeon_atom_ss ss;
@@ -4930,15 +4930,15 @@
 		tmp = freq_nom / reference_clock;
 		tmp = tmp * tmp;
 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
-                                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
 			u32 clks = reference_clock * 5 / ss.rate;
 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
 
-                        mpll_ss1 &= ~CLKV_MASK;
-                        mpll_ss1 |= CLKV(clkv);
+			mpll_ss1 &= ~CLKV_MASK;
+			mpll_ss1 |= CLKV(clkv);
 
-                        mpll_ss2 &= ~CLKS_MASK;
-                        mpll_ss2 |= CLKS(clks);
+			mpll_ss2 &= ~CLKS_MASK;
+			mpll_ss2 |= CLKS(clks);
 		}
 	}
 
@@ -5265,7 +5265,7 @@
 		ni_pi->enable_power_containment = false;
 
 	ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
-        if (ret)
+	if (ret)
 		ni_pi->enable_sq_ramping = false;
 
 	return si_populate_smc_t(rdev, radeon_state, smc_state);
@@ -5436,46 +5436,46 @@
 	case  MC_SEQ_RAS_TIMING >> 2:
 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_CAS_TIMING >> 2:
+	case MC_SEQ_CAS_TIMING >> 2:
 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING >> 2:
+	case MC_SEQ_MISC_TIMING >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
 		break;
-        case MC_SEQ_MISC_TIMING2 >> 2:
+	case MC_SEQ_MISC_TIMING2 >> 2:
 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D0 >> 2:
+	case MC_SEQ_RD_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_RD_CTL_D1 >> 2:
+	case MC_SEQ_RD_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D0 >> 2:
+	case MC_SEQ_WR_CTL_D0 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_D1 >> 2:
+	case MC_SEQ_WR_CTL_D1 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
 		break;
-        case MC_PMG_CMD_EMRS >> 2:
+	case MC_PMG_CMD_EMRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS >> 2:
+	case MC_PMG_CMD_MRS >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS1 >> 2:
+	case MC_PMG_CMD_MRS1 >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
 		break;
-        case MC_SEQ_PMG_TIMING >> 2:
+	case MC_SEQ_PMG_TIMING >> 2:
 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
 		break;
-        case MC_PMG_CMD_MRS2 >> 2:
+	case MC_PMG_CMD_MRS2 >> 2:
 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
 		break;
-        case MC_SEQ_WR_CTL_2 >> 2:
+	case MC_SEQ_WR_CTL_2 >> 2:
 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
 		break;
-        default:
+	default:
 		result = false;
 		break;
 	}
@@ -5562,19 +5562,19 @@
 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
 
-        ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
-        if (ret)
-                goto init_mc_done;
+	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+	if (ret)
+		goto init_mc_done;
 
-        ret = si_copy_vbios_mc_reg_table(table, si_table);
-        if (ret)
-                goto init_mc_done;
+	ret = si_copy_vbios_mc_reg_table(table, si_table);
+	if (ret)
+		goto init_mc_done;
 
 	si_set_s0_mc_reg_index(si_table);
 
 	ret = si_set_mc_special_registers(rdev, si_table);
-        if (ret)
-                goto init_mc_done;
+	if (ret)
+		goto init_mc_done;
 
 	si_set_valid_flag(si_table);
 
@@ -5715,10 +5715,10 @@
 
 static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
 {
-        if (enable)
-                WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
-        else
-                WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+	if (enable)
+		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+	else
+		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
 }
 
 static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
@@ -6820,7 +6820,7 @@
 	struct _NonClockInfoArray *non_clock_info_array;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 	struct ni_ps *ps;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index cd08628..f0d5c17 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -787,8 +787,8 @@
 	struct atom_clock_dividers dividers;
 	int ret;
 
-        ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
-                                             pi->acpi_pl.sclk,
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     pi->acpi_pl.sclk,
 					     false, &dividers);
 	if (ret)
 		return;
@@ -1462,7 +1462,7 @@
 	struct _NonClockInfoArray *non_clock_info_array;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 	struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d34bfcda..6730367 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -369,8 +369,8 @@
 	int ret;
 	u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
 
-        ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
-                                             25000, false, &dividers);
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     25000, false, &dividers);
 	if (ret)
 		return;
 
@@ -587,8 +587,8 @@
 	u32 value;
 	u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
 
-        ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
-                                             sclk, false, &dividers);
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     sclk, false, &dividers);
 	if (ret)
 		return;
 
@@ -597,8 +597,8 @@
 	value |= CLK_DIVIDER(dividers.post_div);
 	WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
 
-        ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
-                                             sclk/2, false, &dividers);
+	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					     sclk/2, false, &dividers);
 	if (ret)
 		return;
 
@@ -1045,14 +1045,14 @@
 	int low_temp = 0 * 1000;
 	int high_temp = 255 * 1000;
 
-        if (low_temp < min_temp)
+	if (low_temp < min_temp)
 		low_temp = min_temp;
-        if (high_temp > max_temp)
+	if (high_temp > max_temp)
 		high_temp = max_temp;
-        if (high_temp < low_temp) {
+	if (high_temp < low_temp) {
 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
-                return -EINVAL;
-        }
+		return -EINVAL;
+	}
 
 	WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
 	WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
@@ -1737,7 +1737,7 @@
 	struct _NonClockInfoArray *non_clock_info_array;
 	union power_info *power_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-        u16 data_offset;
+	u16 data_offset;
 	u8 frev, crev;
 	u8 *power_state_offset;
 	struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index c6b1cbc..12ddcfa 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -522,11 +522,17 @@
 		goto error;
 	}
 
-	r = radeon_fence_wait(fence, false);
-	if (r) {
+	r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+		RADEON_USEC_IB_TEST_TIMEOUT));
+	if (r < 0) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		goto error;
+	} else if (r == 0) {
+		DRM_ERROR("radeon: fence wait timed out.\n");
+		r = -ETIMEDOUT;
+		goto error;
 	}
+	r = 0;
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
 	radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index cdeaab7..fce2144 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -53,7 +53,7 @@
 		WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
 
 		WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
-    } else {
+	} else {
 		tmp = RREG32(VCE_CLOCK_GATING_B);
 		tmp |= 0xe7;
 		tmp &= ~0xe70000;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 96dcd4a7..1f10fa0 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,7 @@
 config DRM_RCAR_DU
 	tristate "DRM Support for R-Car Display Unit"
-	depends on DRM && ARM && OF
+	depends on DRM && OF
+	depends on ARM || ARM64
 	depends on ARCH_SHMOBILE || COMPILE_TEST
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
@@ -14,14 +15,18 @@
 config DRM_RCAR_HDMI
 	bool "R-Car DU HDMI Encoder Support"
 	depends on DRM_RCAR_DU
-	depends on OF
 	help
 	  Enable support for external HDMI encoders.
 
 config DRM_RCAR_LVDS
 	bool "R-Car DU LVDS Encoder Support"
 	depends on DRM_RCAR_DU
-	depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
 	help
-	  Enable support for the R-Car Display Unit embedded LVDS encoders
-	  (currently only on R8A7790 and R8A7791).
+	  Enable support for the R-Car Display Unit embedded LVDS encoders.
+
+config DRM_RCAR_VSP
+	bool "R-Car DU VSP Compositor Support"
+	depends on DRM_RCAR_DU
+	depends on VIDEO_RENESAS_VSP1
+	help
+	  Enable support to expose the R-Car VSP Compositor as KMS planes.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 05de1c4..827711e 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -11,4 +11,6 @@
 					   rcar_du_hdmienc.o
 rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS)	+= rcar_du_lvdsenc.o
 
+rcar-du-drm-$(CONFIG_DRM_RCAR_VSP)	+= rcar_du_vsp.o
+
 obj-$(CONFIG_DRM_RCAR_DU)		+= rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 88a4b70..d9f06cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1,7 +1,7 @@
 /*
  * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -28,6 +28,7 @@
 #include "rcar_du_kms.h"
 #include "rcar_du_plane.h"
 #include "rcar_du_regs.h"
+#include "rcar_du_vsp.h"
 
 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
 {
@@ -150,7 +151,7 @@
 	/* Signal polarities */
 	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
 	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
-	      | DSMR_DIPM_DE | DSMR_CSPM;
+	      | DSMR_DIPM_DISP | DSMR_CSPM;
 	rcar_du_crtc_write(rcrtc, DSMR, value);
 
 	/* Display timings */
@@ -207,6 +208,7 @@
 static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
 {
 	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
+	struct rcar_du_device *rcdu = rcrtc->group->dev;
 	unsigned int num_planes = 0;
 	unsigned int dptsr_planes;
 	unsigned int hwplanes = 0;
@@ -250,6 +252,17 @@
 		}
 	}
 
+	/* If VSP+DU integration is enabled the plane assignment is fixed. */
+	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+		if (rcdu->info->gen < 3) {
+			dspr = (rcrtc->index % 2) + 1;
+			hwplanes = 1 << (rcrtc->index % 2);
+		} else {
+			dspr = (rcrtc->index % 2) ? 3 : 1;
+			hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
+		}
+	}
+
 	/* Update the planes to display timing and dot clock generator
 	 * associations.
 	 *
@@ -272,6 +285,10 @@
 			rcar_du_group_restart(rcrtc->group);
 	}
 
+	/* Restart the group if plane sources have changed. */
+	if (rcrtc->group->need_restart)
+		rcar_du_group_restart(rcrtc->group);
+
 	mutex_unlock(&rcrtc->group->lock);
 
 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
@@ -282,26 +299,6 @@
  * Page Flip
  */
 
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
-				   struct drm_file *file)
-{
-	struct drm_pending_vblank_event *event;
-	struct drm_device *dev = rcrtc->crtc.dev;
-	unsigned long flags;
-
-	/* Destroy the pending vertical blanking event associated with the
-	 * pending page flip, if any, and disable vertical blanking interrupts.
-	 */
-	spin_lock_irqsave(&dev->event_lock, flags);
-	event = rcrtc->event;
-	if (event && event->base.file_priv == file) {
-		rcrtc->event = NULL;
-		event->base.destroy(&event->base);
-		drm_crtc_vblank_put(&rcrtc->crtc);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
 {
 	struct drm_pending_vblank_event *event;
@@ -385,6 +382,10 @@
 
 	rcar_du_group_start_stop(rcrtc->group, true);
 
+	/* Enable the VSP compositor. */
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+		rcar_du_vsp_enable(rcrtc);
+
 	/* Turn vertical blanking interrupt reporting back on. */
 	drm_crtc_vblank_on(crtc);
 
@@ -418,6 +419,10 @@
 	rcar_du_crtc_wait_page_flip(rcrtc);
 	drm_crtc_vblank_off(crtc);
 
+	/* Disable the VSP compositor. */
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+		rcar_du_vsp_disable(rcrtc);
+
 	/* Select switch sync mode. This stops display operation and configures
 	 * the HSYNC and VSYNC signals as inputs.
 	 */
@@ -430,6 +435,9 @@
 
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
 {
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+		rcar_du_vsp_disable(rcrtc);
+
 	rcar_du_crtc_stop(rcrtc);
 	rcar_du_crtc_put(rcrtc);
 }
@@ -438,20 +446,24 @@
 {
 	unsigned int i;
 
-	if (!rcrtc->enabled)
+	if (!rcrtc->crtc.state->active)
 		return;
 
 	rcar_du_crtc_get(rcrtc);
 	rcar_du_crtc_start(rcrtc);
 
 	/* Commit the planes state. */
-	for (i = 0; i < rcrtc->group->num_planes; ++i) {
-		struct rcar_du_plane *plane = &rcrtc->group->planes[i];
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+		rcar_du_vsp_enable(rcrtc);
+	} else {
+		for (i = 0; i < rcrtc->group->num_planes; ++i) {
+			struct rcar_du_plane *plane = &rcrtc->group->planes[i];
 
-		if (plane->plane.state->crtc != &rcrtc->crtc)
-			continue;
+			if (plane->plane.state->crtc != &rcrtc->crtc)
+				continue;
 
-		rcar_du_plane_setup(plane);
+			rcar_du_plane_setup(plane);
+		}
 	}
 
 	rcar_du_crtc_update_planes(rcrtc);
@@ -465,37 +477,20 @@
 {
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
-	if (rcrtc->enabled)
-		return;
-
 	rcar_du_crtc_get(rcrtc);
 	rcar_du_crtc_start(rcrtc);
-
-	rcrtc->enabled = true;
 }
 
 static void rcar_du_crtc_disable(struct drm_crtc *crtc)
 {
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
-	if (!rcrtc->enabled)
-		return;
-
 	rcar_du_crtc_stop(rcrtc);
 	rcar_du_crtc_put(rcrtc);
 
-	rcrtc->enabled = false;
 	rcrtc->outputs = 0;
 }
 
-static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	/* TODO Fixup modes */
-	return true;
-}
-
 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
 				      struct drm_crtc_state *old_crtc_state)
 {
@@ -511,6 +506,9 @@
 		rcrtc->event = event;
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
+
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+		rcar_du_vsp_atomic_begin(rcrtc);
 }
 
 static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -519,10 +517,12 @@
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
 	rcar_du_crtc_update_planes(rcrtc);
+
+	if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+		rcar_du_vsp_atomic_flush(rcrtc);
 }
 
 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
-	.mode_fixup = rcar_du_crtc_mode_fixup,
 	.disable = rcar_du_crtc_disable,
 	.enable = rcar_du_crtc_enable,
 	.atomic_begin = rcar_du_crtc_atomic_begin,
@@ -567,13 +567,14 @@
 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 {
 	static const unsigned int mmio_offsets[] = {
-		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
+		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
 	};
 
 	struct rcar_du_device *rcdu = rgrp->dev;
 	struct platform_device *pdev = to_platform_device(rcdu->dev);
 	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
 	struct drm_crtc *crtc = &rcrtc->crtc;
+	struct drm_plane *primary;
 	unsigned int irqflags;
 	struct clk *clk;
 	char clk_name[9];
@@ -609,10 +610,13 @@
 	rcrtc->group = rgrp;
 	rcrtc->mmio_offset = mmio_offsets[index];
 	rcrtc->index = index;
-	rcrtc->enabled = false;
 
-	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
-					&rgrp->planes[index % 2].plane,
+	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
+		primary = &rcrtc->vsp->planes[0].plane;
+	else
+		primary = &rgrp->planes[index % 2].plane;
+
+	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary,
 					NULL, &crtc_funcs, NULL);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 4b95d9d..6f08b7e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -21,6 +21,7 @@
 #include <drm/drm_crtc.h>
 
 struct rcar_du_group;
+struct rcar_du_vsp;
 
 /**
  * struct rcar_du_crtc - the CRTC, representing a DU superposition processor
@@ -33,7 +34,6 @@
  * @event: event to post when the pending page flip completes
  * @flip_wait: wait queue used to signal page flip completion
  * @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
- * @enabled: whether the CRTC is enabled, used to control system resume
  * @group: CRTC group this CRTC belongs to
  */
 struct rcar_du_crtc {
@@ -49,9 +49,9 @@
 	wait_queue_head_t flip_wait;
 
 	unsigned int outputs;
-	bool enabled;
 
 	struct rcar_du_group *group;
+	struct rcar_du_vsp *vsp;
 };
 
 #define to_rcar_crtc(c)	container_of(c, struct rcar_du_crtc, crtc)
@@ -67,8 +67,6 @@
 
 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
 void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
-				   struct drm_file *file);
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 40422f6..ed6006b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -1,7 +1,7 @@
 /*
  * rcar_du_drv.c  --  R-Car Display Unit DRM driver
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -36,6 +36,7 @@
  */
 
 static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+	.gen = 2,
 	.features = 0,
 	.num_crtcs = 2,
 	.routes = {
@@ -57,6 +58,7 @@
 };
 
 static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
 	.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
@@ -86,6 +88,7 @@
 
 /* M2-W (r8a7791) and M2-N (r8a7793) are identical */
 static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
 	.num_crtcs = 2,
@@ -108,6 +111,7 @@
 };
 
 static const struct rcar_du_device_info rcar_du_r8a7794_info = {
+	.gen = 2,
 	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
 		  | RCAR_DU_FEATURE_EXT_CTRL_REGS,
 	.num_crtcs = 2,
@@ -129,12 +133,37 @@
 	.num_lvds = 0,
 };
 
+static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+	.gen = 3,
+	.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+		  | RCAR_DU_FEATURE_EXT_CTRL_REGS
+		  | RCAR_DU_FEATURE_VSP1_SOURCE,
+	.num_crtcs = 4,
+	.routes = {
+		/* R8A7795 has one RGB output, one LVDS output and two
+		 * (currently unsupported) HDMI outputs.
+		 */
+		[RCAR_DU_OUTPUT_DPAD0] = {
+			.possible_crtcs = BIT(3),
+			.encoder_type = DRM_MODE_ENCODER_NONE,
+			.port = 0,
+		},
+		[RCAR_DU_OUTPUT_LVDS0] = {
+			.possible_crtcs = BIT(0),
+			.encoder_type = DRM_MODE_ENCODER_LVDS,
+			.port = 3,
+		},
+	},
+	.num_lvds = 1,
+};
+
 static const struct of_device_id rcar_du_of_table[] = {
 	{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
 	{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
 	{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
 	{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
 	{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
+	{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
 	{ }
 };
 
@@ -144,91 +173,6 @@
  * DRM operations
  */
 
-static int rcar_du_unload(struct drm_device *dev)
-{
-	struct rcar_du_device *rcdu = dev->dev_private;
-
-	if (rcdu->fbdev)
-		drm_fbdev_cma_fini(rcdu->fbdev);
-
-	drm_kms_helper_poll_fini(dev);
-	drm_mode_config_cleanup(dev);
-	drm_vblank_cleanup(dev);
-
-	dev->irq_enabled = 0;
-	dev->dev_private = NULL;
-
-	return 0;
-}
-
-static int rcar_du_load(struct drm_device *dev, unsigned long flags)
-{
-	struct platform_device *pdev = dev->platformdev;
-	struct device_node *np = pdev->dev.of_node;
-	struct rcar_du_device *rcdu;
-	struct resource *mem;
-	int ret;
-
-	if (np == NULL) {
-		dev_err(dev->dev, "no platform data\n");
-		return -ENODEV;
-	}
-
-	rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
-	if (rcdu == NULL) {
-		dev_err(dev->dev, "failed to allocate private data\n");
-		return -ENOMEM;
-	}
-
-	init_waitqueue_head(&rcdu->commit.wait);
-
-	rcdu->dev = &pdev->dev;
-	rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
-	rcdu->ddev = dev;
-	dev->dev_private = rcdu;
-
-	/* I/O resources */
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(rcdu->mmio))
-		return PTR_ERR(rcdu->mmio);
-
-	/* Initialize vertical blanking interrupts handling. Start with vblank
-	 * disabled for all CRTCs.
-	 */
-	ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to initialize vblank\n");
-		goto done;
-	}
-
-	/* DRM/KMS objects */
-	ret = rcar_du_modeset_init(rcdu);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
-		goto done;
-	}
-
-	dev->irq_enabled = 1;
-
-	platform_set_drvdata(pdev, rcdu);
-
-done:
-	if (ret)
-		rcar_du_unload(dev);
-
-	return ret;
-}
-
-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct rcar_du_device *rcdu = dev->dev_private;
-	unsigned int i;
-
-	for (i = 0; i < rcdu->num_crtcs; ++i)
-		rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
-}
-
 static void rcar_du_lastclose(struct drm_device *dev)
 {
 	struct rcar_du_device *rcdu = dev->dev_private;
@@ -269,11 +213,7 @@
 static struct drm_driver rcar_du_driver = {
 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
 				| DRIVER_ATOMIC,
-	.load			= rcar_du_load,
-	.unload			= rcar_du_unload,
-	.preclose		= rcar_du_preclose,
 	.lastclose		= rcar_du_lastclose,
-	.set_busid		= drm_platform_set_busid,
 	.get_vblank_counter	= drm_vblank_no_hw_counter,
 	.enable_vblank		= rcar_du_enable_vblank,
 	.disable_vblank		= rcar_du_disable_vblank,
@@ -333,20 +273,118 @@
  * Platform driver
  */
 
-static int rcar_du_probe(struct platform_device *pdev)
-{
-	return drm_platform_init(&rcar_du_driver, pdev);
-}
-
 static int rcar_du_remove(struct platform_device *pdev)
 {
 	struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+	struct drm_device *ddev = rcdu->ddev;
 
-	drm_put_dev(rcdu->ddev);
+	mutex_lock(&ddev->mode_config.mutex);
+	drm_connector_unplug_all(ddev);
+	mutex_unlock(&ddev->mode_config.mutex);
+
+	drm_dev_unregister(ddev);
+
+	if (rcdu->fbdev)
+		drm_fbdev_cma_fini(rcdu->fbdev);
+
+	drm_kms_helper_poll_fini(ddev);
+	drm_mode_config_cleanup(ddev);
+	drm_vblank_cleanup(ddev);
+
+	drm_dev_unref(ddev);
 
 	return 0;
 }
 
+static int rcar_du_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct rcar_du_device *rcdu;
+	struct drm_connector *connector;
+	struct drm_device *ddev;
+	struct resource *mem;
+	int ret;
+
+	if (np == NULL) {
+		dev_err(&pdev->dev, "no device tree node\n");
+		return -ENODEV;
+	}
+
+	/* Allocate and initialize the DRM and R-Car device structures. */
+	rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
+	if (rcdu == NULL)
+		return -ENOMEM;
+
+	init_waitqueue_head(&rcdu->commit.wait);
+
+	rcdu->dev = &pdev->dev;
+	rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+
+	ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+	if (!ddev)
+		return -ENOMEM;
+
+	drm_dev_set_unique(ddev, dev_name(&pdev->dev));
+
+	rcdu->ddev = ddev;
+	ddev->dev_private = rcdu;
+
+	platform_set_drvdata(pdev, rcdu);
+
+	/* I/O resources */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(rcdu->mmio)) {
+		ret = PTR_ERR(rcdu->mmio);
+		goto error;
+	}
+
+	/* Initialize vertical blanking interrupts handling. Start with vblank
+	 * disabled for all CRTCs.
+	 */
+	ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize vblank\n");
+		goto error;
+	}
+
+	/* DRM/KMS objects */
+	ret = rcar_du_modeset_init(rcdu);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+		goto error;
+	}
+
+	ddev->irq_enabled = 1;
+
+	/* Register the DRM device with the core and the connectors with
+	 * sysfs.
+	 */
+	ret = drm_dev_register(ddev, 0);
+	if (ret)
+		goto error;
+
+	mutex_lock(&ddev->mode_config.mutex);
+	drm_for_each_connector(connector, ddev) {
+		ret = drm_connector_register(connector);
+		if (ret < 0)
+			break;
+	}
+	mutex_unlock(&ddev->mode_config.mutex);
+
+	if (ret < 0)
+		goto error;
+
+	DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+
+	return 0;
+
+error:
+	rcar_du_remove(pdev);
+
+	return ret;
+}
+
 static struct platform_driver rcar_du_platform_driver = {
 	.probe		= rcar_du_probe,
 	.remove		= rcar_du_remove,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 9f34fc8..ed35467 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -1,7 +1,7 @@
 /*
  * rcar_du_drv.h  --  R-Car Display Unit DRM driver
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -19,6 +19,7 @@
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_group.h"
+#include "rcar_du_vsp.h"
 
 struct clk;
 struct device;
@@ -29,6 +30,7 @@
 
 #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK	(1 << 0)	/* Per-CRTC IRQ and clock */
 #define RCAR_DU_FEATURE_EXT_CTRL_REGS	(1 << 1)	/* Has extended control registers */
+#define RCAR_DU_FEATURE_VSP1_SOURCE	(1 << 2)	/* Has inputs from VSP1 */
 
 #define RCAR_DU_QUIRK_ALIGN_128B	(1 << 0)	/* Align pitches to 128 bytes */
 #define RCAR_DU_QUIRK_LVDS_LANES	(1 << 1)	/* LVDS lanes 1 and 3 inverted */
@@ -51,6 +53,7 @@
 
 /*
  * struct rcar_du_device_info - DU model-specific information
+ * @gen: device generation (2 or 3)
  * @features: device features (RCAR_DU_FEATURE_*)
  * @quirks: device quirks (RCAR_DU_QUIRK_*)
  * @num_crtcs: total number of CRTCs
@@ -58,6 +61,7 @@
  * @num_lvds: number of internal LVDS encoders
  */
 struct rcar_du_device_info {
+	unsigned int gen;
 	unsigned int features;
 	unsigned int quirks;
 	unsigned int num_crtcs;
@@ -65,9 +69,10 @@
 	unsigned int num_lvds;
 };
 
-#define RCAR_DU_MAX_CRTCS		3
+#define RCAR_DU_MAX_CRTCS		4
 #define RCAR_DU_MAX_GROUPS		DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
 #define RCAR_DU_MAX_LVDS		2
+#define RCAR_DU_MAX_VSPS		4
 
 struct rcar_du_device {
 	struct device *dev;
@@ -82,6 +87,7 @@
 	unsigned int num_crtcs;
 
 	struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+	struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
 
 	struct {
 		struct drm_property *alpha;
@@ -90,6 +96,8 @@
 	} props;
 
 	unsigned int dpad0_source;
+	unsigned int vspd1_sink;
+
 	struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
 
 	struct {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index c087007..4e939e4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -89,12 +89,8 @@
 	/* The flat panel mode is fixed, just copy it to the adjusted mode. */
 	drm_mode_copy(adjusted_mode, panel_mode);
 
-	/* The internal LVDS encoder has a clock frequency operating range of
-	 * 30MHz to 150MHz. Clamp the clock accordingly.
-	 */
 	if (renc->lvds)
-		adjusted_mode->clock = clamp(adjusted_mode->clock,
-					     30000, 150000);
+		rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 8e2ffe0..33b2fc5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -1,7 +1,7 @@
 /*
  * rcar_du_group.c  --  R-Car Display Unit Channels Pair
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -44,29 +44,64 @@
 	rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
 }
 
+static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp)
+{
+	u32 defr6 = DEFR6_CODE | DEFR6_ODPM12_DISP;
+
+	if (rgrp->num_crtcs > 1)
+		defr6 |= DEFR6_ODPM22_DISP;
+
+	rcar_du_group_write(rgrp, DEFR6, defr6);
+}
+
 static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
 {
-	u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
+	struct rcar_du_device *rcdu = rgrp->dev;
+	unsigned int possible_crtcs =
+		rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs;
+	u32 defr8 = DEFR8_CODE;
 
-	/* The DEFR8 register for the first group also controls RGB output
-	 * routing to DPAD0 for DU instances that support it.
-	 */
-	if (rgrp->dev->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs > 1 &&
-	    rgrp->index == 0)
-		defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
+	if (rcdu->info->gen < 3) {
+		defr8 |= DEFR8_DEFE8;
+
+		/* On Gen2 the DEFR8 register for the first group also controls
+		 * RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for
+		 * DU instances that support it.
+		 */
+		if (rgrp->index == 0) {
+			if (possible_crtcs > 1)
+				defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source);
+			if (rgrp->dev->vspd1_sink == 2)
+				defr8 |= DEFR8_VSCS;
+		}
+	} else {
+		/* On Gen3 VSPD routing can't be configured, but DPAD routing
+		 * needs to be set despite having a single option available.
+		 */
+		u32 crtc = ffs(possible_crtcs) - 1;
+
+		if (crtc / 2 == rgrp->index)
+			defr8 |= DEFR8_DRGBS_DU(crtc);
+	}
 
 	rcar_du_group_write(rgrp, DEFR8, defr8);
 }
 
 static void rcar_du_group_setup(struct rcar_du_group *rgrp)
 {
+	struct rcar_du_device *rcdu = rgrp->dev;
+
 	/* Enable extended features */
 	rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
-	rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
-	rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
-	rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+	if (rcdu->info->gen < 3) {
+		rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+		rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+		rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+	}
 	rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
 
+	rcar_du_group_setup_pins(rgrp);
+
 	if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
 		rcar_du_group_setup_defr8(rgrp);
 
@@ -82,6 +117,9 @@
 				    DIDSR_PDCS_CLK(0, 0));
 	}
 
+	if (rcdu->info->gen >= 3)
+		rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10);
+
 	/* Use DS1PR and DS2PR to configure planes priorities and connects the
 	 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
 	 */
@@ -158,21 +196,23 @@
 
 void rcar_du_group_restart(struct rcar_du_group *rgrp)
 {
+	rgrp->need_restart = false;
+
 	__rcar_du_group_start_stop(rgrp, false);
 	__rcar_du_group_start_stop(rgrp, true);
 }
 
-static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
+int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
 {
 	int ret;
 
 	if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
 		return 0;
 
-	/* RGB output routing to DPAD0 is configured in the DEFR8 register of
-	 * the first group. As this function can be called with the DU0 and DU1
-	 * CRTCs disabled, we need to enable the first group clock before
-	 * accessing the register.
+	/* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
+	 * configured in the DEFR8 register of the first group. As this function
+	 * can be called with the DU0 and DU1 CRTCs disabled, we need to enable
+	 * the first group clock before accessing the register.
 	 */
 	ret = clk_prepare_enable(rcdu->crtcs[0].clock);
 	if (ret < 0)
@@ -203,5 +243,5 @@
 
 	rcar_du_group_write(rgrp, DORCR, dorcr);
 
-	return rcar_du_set_dpad0_routing(rgrp->dev);
+	return rcar_du_set_dpad0_vsp1_routing(rgrp->dev);
 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index d7318e1..5e3adc6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -32,6 +32,7 @@
  * @dptsr_planes: bitmask of planes driven by dot-clock and timing generator 1
  * @num_planes: number of planes in the group
  * @planes: planes handled by the group
+ * @need_restart: the group needs to be restarted due to a configuration change
  */
 struct rcar_du_group {
 	struct rcar_du_device *dev;
@@ -47,6 +48,7 @@
 
 	unsigned int num_planes;
 	struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
+	bool need_restart;
 };
 
 u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
@@ -58,4 +60,6 @@
 void rcar_du_group_restart(struct rcar_du_group *rgrp);
 int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
 
+int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu);
+
 #endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index a37b6e2..6c92714 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -55,12 +55,6 @@
 	.best_encoder = rcar_du_connector_best_encoder,
 };
 
-static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
 static enum drm_connector_status
 rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -79,7 +73,7 @@
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = rcar_du_hdmi_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = rcar_du_hdmi_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
@@ -108,9 +102,6 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_connector_register(connector);
-	if (ret < 0)
-		return ret;
 
 	connector->dpms = DRM_MODE_DPMS_OFF;
 	drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 2567efc..461662d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -71,12 +71,9 @@
 	struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
 	const struct drm_display_mode *mode = &crtc_state->mode;
 
-	/* The internal LVDS encoder has a clock frequency operating range of
-	 * 30MHz to 150MHz. Clamp the clock accordingly.
-	 */
 	if (hdmienc->renc->lvds)
-		adjusted_mode->clock = clamp(adjusted_mode->clock,
-					     30000, 150000);
+		rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
+					     adjusted_mode);
 
 	if (sfuncs->mode_fixup == NULL)
 		return 0;
@@ -134,12 +131,19 @@
 
 	/* Locate the slave I2C device and driver. */
 	i2c_slave = of_find_i2c_device_by_node(np);
-	if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
+	if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
+		dev_dbg(rcdu->dev,
+			"can't get I2C slave for %s, deferring probe\n",
+			of_node_full_name(np));
 		return -EPROBE_DEFER;
+	}
 
 	hdmienc->dev = &i2c_slave->dev;
 
 	if (hdmienc->dev->driver == NULL) {
+		dev_dbg(rcdu->dev,
+			"I2C slave %s not probed yet, deferring probe\n",
+			dev_name(hdmienc->dev));
 		ret = -EPROBE_DEFER;
 		goto error;
 	}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 43bce69..24725bf 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -1,7 +1,7 @@
 /*
  * rcar_du_kms.c  --  R-Car Display Unit Mode Setting
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -28,6 +28,7 @@
 #include "rcar_du_kms.h"
 #include "rcar_du_lvdsenc.h"
 #include "rcar_du_regs.h"
+#include "rcar_du_vsp.h"
 
 /* -----------------------------------------------------------------------------
  * Format helpers
@@ -89,13 +90,44 @@
 		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
 		.edf = PnDDCR4_EDF_NONE,
 	}, {
-		/* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
 		.fourcc = DRM_FORMAT_NV16,
 		.bpp = 16,
 		.planes = 2,
 		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
 		.edf = PnDDCR4_EDF_NONE,
 	},
+	/* The following formats are not supported on Gen2 and thus have no
+	 * associated .pnmr or .edf settings.
+	 */
+	{
+		.fourcc = DRM_FORMAT_NV61,
+		.bpp = 16,
+		.planes = 2,
+	}, {
+		.fourcc = DRM_FORMAT_YUV420,
+		.bpp = 12,
+		.planes = 3,
+	}, {
+		.fourcc = DRM_FORMAT_YVU420,
+		.bpp = 12,
+		.planes = 3,
+	}, {
+		.fourcc = DRM_FORMAT_YUV422,
+		.bpp = 16,
+		.planes = 3,
+	}, {
+		.fourcc = DRM_FORMAT_YVU422,
+		.bpp = 16,
+		.planes = 3,
+	}, {
+		.fourcc = DRM_FORMAT_YUV444,
+		.bpp = 24,
+		.planes = 3,
+	}, {
+		.fourcc = DRM_FORMAT_YVU444,
+		.bpp = 24,
+		.planes = 3,
+	},
 };
 
 const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
@@ -143,6 +175,7 @@
 	unsigned int max_pitch;
 	unsigned int align;
 	unsigned int bpp;
+	unsigned int i;
 
 	format = rcar_du_format_info(mode_cmd->pixel_format);
 	if (format == NULL) {
@@ -155,7 +188,7 @@
 	 * The pitch and alignment constraints are expressed in pixels on the
 	 * hardware side and in bytes in the DRM API.
 	 */
-	bpp = format->planes == 2 ? 1 : format->bpp / 8;
+	bpp = format->planes == 1 ? format->bpp / 8 : 1;
 	max_pitch =  4096 * bpp;
 
 	if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
@@ -170,8 +203,8 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (format->planes == 2) {
-		if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
+	for (i = 1; i < format->planes; ++i) {
+		if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) {
 			dev_dbg(dev->dev,
 				"luma and chroma pitches do not match\n");
 			return ERR_PTR(-EINVAL);
@@ -192,252 +225,20 @@
  * Atomic Check and Update
  */
 
-/*
- * Atomic hardware plane allocator
- *
- * The hardware plane allocator is solely based on the atomic plane states
- * without keeping any external state to avoid races between .atomic_check()
- * and .atomic_commit().
- *
- * The core idea is to avoid using a free planes bitmask that would need to be
- * shared between check and commit handlers with a collective knowledge based on
- * the allocated hardware plane(s) for each KMS plane. The allocator then loops
- * over all plane states to compute the free planes bitmask, allocates hardware
- * planes based on that bitmask, and stores the result back in the plane states.
- *
- * For this to work we need to access the current state of planes not touched by
- * the atomic update. To ensure that it won't be modified, we need to lock all
- * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
- * updates from .atomic_check() up to completion (when swapping the states if
- * the check step has succeeded) or rollback (when freeing the states if the
- * check step has failed).
- *
- * Allocation is performed in the .atomic_check() handler and applied
- * automatically when the core swaps the old and new states.
- */
-
-static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
-					struct rcar_du_plane_state *state)
-{
-	const struct rcar_du_format_info *cur_format;
-
-	cur_format = to_rcar_plane_state(plane->plane.state)->format;
-
-	/* Lowering the number of planes doesn't strictly require reallocation
-	 * as the extra hardware plane will be freed when committing, but doing
-	 * so could lead to more fragmentation.
-	 */
-	return !cur_format || cur_format->planes != state->format->planes;
-}
-
-static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
-{
-	unsigned int mask;
-
-	if (state->hwindex == -1)
-		return 0;
-
-	mask = 1 << state->hwindex;
-	if (state->format->planes == 2)
-		mask |= 1 << ((state->hwindex + 1) % 8);
-
-	return mask;
-}
-
-static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
-{
-	unsigned int i;
-
-	for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
-		if (!(free & (1 << i)))
-			continue;
-
-		if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
-			break;
-	}
-
-	return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
-}
-
 static int rcar_du_atomic_check(struct drm_device *dev,
 				struct drm_atomic_state *state)
 {
 	struct rcar_du_device *rcdu = dev->dev_private;
-	unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
-	unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
-	bool needs_realloc = false;
-	unsigned int groups = 0;
-	unsigned int i;
 	int ret;
 
 	ret = drm_atomic_helper_check(dev, state);
 	if (ret < 0)
 		return ret;
 
-	/* Check if hardware planes need to be reallocated. */
-	for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
-		struct rcar_du_plane_state *plane_state;
-		struct rcar_du_plane *plane;
-		unsigned int index;
-
-		if (!state->planes[i])
-			continue;
-
-		plane = to_rcar_plane(state->planes[i]);
-		plane_state = to_rcar_plane_state(state->plane_states[i]);
-
-		dev_dbg(rcdu->dev, "%s: checking plane (%u,%u)\n", __func__,
-			plane->group->index, plane - plane->group->planes);
-
-		/* If the plane is being disabled we don't need to go through
-		 * the full reallocation procedure. Just mark the hardware
-		 * plane(s) as freed.
-		 */
-		if (!plane_state->format) {
-			dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
-				__func__);
-			index = plane - plane->group->planes;
-			group_freed_planes[plane->group->index] |= 1 << index;
-			plane_state->hwindex = -1;
-			continue;
-		}
-
-		/* If the plane needs to be reallocated mark it as such, and
-		 * mark the hardware plane(s) as free.
-		 */
-		if (rcar_du_plane_needs_realloc(plane, plane_state)) {
-			dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
-				__func__);
-			groups |= 1 << plane->group->index;
-			needs_realloc = true;
-
-			index = plane - plane->group->planes;
-			group_freed_planes[plane->group->index] |= 1 << index;
-			plane_state->hwindex = -1;
-		}
-	}
-
-	if (!needs_realloc)
+	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
 		return 0;
 
-	/* Grab all plane states for the groups that need reallocation to ensure
-	 * locking and avoid racy updates. This serializes the update operation,
-	 * but there's not much we can do about it as that's the hardware
-	 * design.
-	 *
-	 * Compute the used planes mask for each group at the same time to avoid
-	 * looping over the planes separately later.
-	 */
-	while (groups) {
-		unsigned int index = ffs(groups) - 1;
-		struct rcar_du_group *group = &rcdu->groups[index];
-		unsigned int used_planes = 0;
-
-		dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
-			__func__, index);
-
-		for (i = 0; i < group->num_planes; ++i) {
-			struct rcar_du_plane *plane = &group->planes[i];
-			struct rcar_du_plane_state *plane_state;
-			struct drm_plane_state *s;
-
-			s = drm_atomic_get_plane_state(state, &plane->plane);
-			if (IS_ERR(s))
-				return PTR_ERR(s);
-
-			/* If the plane has been freed in the above loop its
-			 * hardware planes must not be added to the used planes
-			 * bitmask. However, the current state doesn't reflect
-			 * the free state yet, as we've modified the new state
-			 * above. Use the local freed planes list to check for
-			 * that condition instead.
-			 */
-			if (group_freed_planes[index] & (1 << i)) {
-				dev_dbg(rcdu->dev,
-					"%s: plane (%u,%u) has been freed, skipping\n",
-					__func__, plane->group->index,
-					plane - plane->group->planes);
-				continue;
-			}
-
-			plane_state = to_rcar_plane_state(plane->plane.state);
-			used_planes |= rcar_du_plane_hwmask(plane_state);
-
-			dev_dbg(rcdu->dev,
-				"%s: plane (%u,%u) uses %u hwplanes (index %d)\n",
-				__func__, plane->group->index,
-				plane - plane->group->planes,
-				plane_state->format ?
-				plane_state->format->planes : 0,
-				plane_state->hwindex);
-		}
-
-		group_free_planes[index] = 0xff & ~used_planes;
-		groups &= ~(1 << index);
-
-		dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
-			__func__, index, group_free_planes[index]);
-	}
-
-	/* Reallocate hardware planes for each plane that needs it. */
-	for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
-		struct rcar_du_plane_state *plane_state;
-		struct rcar_du_plane *plane;
-		unsigned int crtc_planes;
-		unsigned int free;
-		int idx;
-
-		if (!state->planes[i])
-			continue;
-
-		plane = to_rcar_plane(state->planes[i]);
-		plane_state = to_rcar_plane_state(state->plane_states[i]);
-
-		dev_dbg(rcdu->dev, "%s: allocating plane (%u,%u)\n", __func__,
-			plane->group->index, plane - plane->group->planes);
-
-		/* Skip planes that are being disabled or don't need to be
-		 * reallocated.
-		 */
-		if (!plane_state->format ||
-		    !rcar_du_plane_needs_realloc(plane, plane_state))
-			continue;
-
-		/* Try to allocate the plane from the free planes currently
-		 * associated with the target CRTC to avoid restarting the CRTC
-		 * group and thus minimize flicker. If it fails fall back to
-		 * allocating from all free planes.
-		 */
-		crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
-			    ? plane->group->dptsr_planes
-			    : ~plane->group->dptsr_planes;
-		free = group_free_planes[plane->group->index];
-
-		idx = rcar_du_plane_hwalloc(plane_state->format->planes,
-					    free & crtc_planes);
-		if (idx < 0)
-			idx = rcar_du_plane_hwalloc(plane_state->format->planes,
-						    free);
-		if (idx < 0) {
-			dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
-				__func__);
-			return idx;
-		}
-
-		dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
-			__func__, plane_state->format->planes, idx);
-
-		plane_state->hwindex = idx;
-
-		group_free_planes[plane->group->index] &=
-			~rcar_du_plane_hwmask(plane_state);
-
-		dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
-			__func__, plane->group->index,
-			group_free_planes[plane->group->index]);
-	}
-
-	return 0;
+	return rcar_du_atomic_check_planes(dev, state);
 }
 
 struct rcar_du_commit {
@@ -456,7 +257,7 @@
 	/* Apply the atomic update. */
 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
-	drm_atomic_helper_commit_planes(dev, old_state, false);
+	drm_atomic_helper_commit_planes(dev, old_state, true);
 
 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
 
@@ -775,14 +576,34 @@
 		rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
 
 		/* If we have more than one CRTCs in this group pre-associate
-		 * planes 0-3 with CRTC 0 and planes 4-7 with CRTC 1 to minimize
-		 * flicker occurring when the association is changed.
+		 * the low-order planes with CRTC 0 and the high-order planes
+		 * with CRTC 1 to minimize flicker occurring when the
+		 * association is changed.
 		 */
-		rgrp->dptsr_planes = rgrp->num_crtcs > 1 ? 0xf0 : 0;
+		rgrp->dptsr_planes = rgrp->num_crtcs > 1
+				   ? (rcdu->info->gen >= 3 ? 0x04 : 0xf0)
+				   : 0;
 
-		ret = rcar_du_planes_init(rgrp);
-		if (ret < 0)
-			return ret;
+		if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+			ret = rcar_du_planes_init(rgrp);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	/* Initialize the compositors. */
+	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+		for (i = 0; i < rcdu->num_crtcs; ++i) {
+			struct rcar_du_vsp *vsp = &rcdu->vsps[i];
+
+			vsp->index = i;
+			vsp->dev = rcdu;
+			rcdu->crtcs[i].vsp = vsp;
+
+			ret = rcar_du_vsp_init(vsp);
+			if (ret < 0)
+				return ret;
+		}
 	}
 
 	/* Create the CRTCs. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 0c43032..e905f5d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -62,12 +62,6 @@
 	.best_encoder = rcar_du_connector_best_encoder,
 };
 
-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
 static enum drm_connector_status
 rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -79,7 +73,7 @@
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = rcar_du_lvds_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = rcar_du_lvds_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
@@ -117,9 +111,6 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_connector_register(connector);
-	if (ret < 0)
-		return ret;
 
 	connector->dpms = DRM_MODE_DPMS_OFF;
 	drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 85043c5..ef3a503 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -38,14 +38,97 @@
 	iowrite32(data, lvds->mmio + reg);
 }
 
-static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
-				 struct rcar_du_crtc *rcrtc)
+static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
+				       struct rcar_du_crtc *rcrtc)
 {
 	const struct drm_display_mode *mode = &rcrtc->crtc.mode;
 	unsigned int freq = mode->clock;
 	u32 lvdcr0;
-	u32 lvdhcr;
 	u32 pllcr;
+
+	/* PLL clock configuration */
+	if (freq < 39000)
+		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+	else if (freq < 61000)
+		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+	else if (freq < 121000)
+		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+	else
+		pllcr = LVDPLLCR_PLLDLYCNT_150M;
+
+	rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+	/* Select the input, hardcode mode 0, enable LVDS operation and turn
+	 * bias circuitry on.
+	 */
+	lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+	if (rcrtc->index == 2)
+		lvdcr0 |= LVDCR0_DUSEL;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+	/* Turn all the channels on. */
+	rcar_lvds_write(lvds, LVDCR1,
+			LVDCR1_CHSTBY_GEN2(3) | LVDCR1_CHSTBY_GEN2(2) |
+			LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
+			LVDCR1_CLKSTBY_GEN2);
+
+	/* Turn the PLL on, wait for the startup delay, and turn the output
+	 * on.
+	 */
+	lvdcr0 |= LVDCR0_PLLON;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+	usleep_range(100, 150);
+
+	lvdcr0 |= LVDCR0_LVRES;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+}
+
+static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
+				       struct rcar_du_crtc *rcrtc)
+{
+	const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+	unsigned int freq = mode->clock;
+	u32 lvdcr0;
+	u32 pllcr;
+
+	/* PLL clock configuration */
+	if (freq < 42000)
+		pllcr = LVDPLLCR_PLLDIVCNT_42M;
+	else if (freq < 85000)
+		pllcr = LVDPLLCR_PLLDIVCNT_85M;
+	else if (freq < 128000)
+		pllcr = LVDPLLCR_PLLDIVCNT_128M;
+	else
+		pllcr = LVDPLLCR_PLLDIVCNT_148M;
+
+	rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+	/* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+	 * delay and turn the output on.
+	 */
+	lvdcr0 = LVDCR0_PLLON;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+	lvdcr0 |= LVDCR0_PWD;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+	usleep_range(100, 150);
+
+	lvdcr0 |= LVDCR0_LVRES;
+	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+	/* Turn all the channels on. */
+	rcar_lvds_write(lvds, LVDCR1,
+			LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+			LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+			LVDCR1_CLKSTBY_GEN3);
+}
+
+static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+				 struct rcar_du_crtc *rcrtc)
+{
+	u32 lvdhcr;
 	int ret;
 
 	if (lvds->enabled)
@@ -55,18 +138,6 @@
 	if (ret < 0)
 		return ret;
 
-	/* PLL clock configuration */
-	if (freq <= 38000)
-		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
-	else if (freq <= 60000)
-		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
-	else if (freq <= 121000)
-		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
-	else
-		pllcr = LVDPLLCR_PLLDLYCNT_150M;
-
-	rcar_lvds_write(lvds, LVDPLLCR, pllcr);
-
 	/* Hardcode the channels and control signals routing for now.
 	 *
 	 * HSYNC -> CTRL0
@@ -87,30 +158,14 @@
 
 	rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
 
-	/* Select the input, hardcode mode 0, enable LVDS operation and turn
-	 * bias circuitry on.
-	 */
-	lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
-	if (rcrtc->index == 2)
-		lvdcr0 |= LVDCR0_DUSEL;
-	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
-	/* Turn all the channels on. */
-	rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
-			LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
-
-	/* Turn the PLL on, wait for the startup delay, and turn the output
-	 * on.
-	 */
-	lvdcr0 |= LVDCR0_PLLEN;
-	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
-	usleep_range(100, 150);
-
-	lvdcr0 |= LVDCR0_LVRES;
-	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+	/* Perform generation-specific initialization. */
+	if (lvds->dev->info->gen < 3)
+		rcar_du_lvdsenc_start_gen2(lvds, rcrtc);
+	else
+		rcar_du_lvdsenc_start_gen3(lvds, rcrtc);
 
 	lvds->enabled = true;
+
 	return 0;
 }
 
@@ -140,6 +195,21 @@
 		return -EINVAL;
 }
 
+void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+				  struct drm_display_mode *mode)
+{
+	struct rcar_du_device *rcdu = lvds->dev;
+
+	/* The internal LVDS encoder has a restricted clock frequency operating
+	 * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp
+	 * the clock accordingly.
+	 */
+	if (rcdu->info->gen < 3)
+		mode->clock = clamp(mode->clock, 30000, 150000);
+	else
+		mode->clock = clamp(mode->clock, 25175, 148500);
+}
+
 static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
 					 struct platform_device *pdev)
 {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 9a6001c..dfdba74 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -30,6 +30,8 @@
 int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
 int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
 			   struct drm_crtc *crtc, bool enable);
+void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+				  struct drm_display_mode *mode);
 #else
 static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
 {
@@ -40,6 +42,10 @@
 {
 	return 0;
 }
+static inline void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+						struct drm_display_mode *mode)
+{
+}
 #endif
 
 #endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c3ed952..8460ae1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -1,7 +1,7 @@
 /*
  * rcar_du_plane.c  --  R-Car Display Unit Planes
  *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -12,6 +12,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
@@ -20,21 +21,300 @@
 #include <drm/drm_plane_helper.h>
 
 #include "rcar_du_drv.h"
+#include "rcar_du_group.h"
 #include "rcar_du_kms.h"
 #include "rcar_du_plane.h"
 #include "rcar_du_regs.h"
 
+/* -----------------------------------------------------------------------------
+ * Atomic hardware plane allocator
+ *
+ * The hardware plane allocator is solely based on the atomic plane states
+ * without keeping any external state to avoid races between .atomic_check()
+ * and .atomic_commit().
+ *
+ * The core idea is to avoid using a free planes bitmask that would need to be
+ * shared between check and commit handlers with a collective knowledge based on
+ * the allocated hardware plane(s) for each KMS plane. The allocator then loops
+ * over all plane states to compute the free planes bitmask, allocates hardware
+ * planes based on that bitmask, and stores the result back in the plane states.
+ *
+ * For this to work we need to access the current state of planes not touched by
+ * the atomic update. To ensure that it won't be modified, we need to lock all
+ * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
+ * updates from .atomic_check() up to completion (when swapping the states if
+ * the check step has succeeded) or rollback (when freeing the states if the
+ * check step has failed).
+ *
+ * Allocation is performed in the .atomic_check() handler and applied
+ * automatically when the core swaps the old and new states.
+ */
+
+static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
+					struct rcar_du_plane_state *new_state)
+{
+	struct rcar_du_plane_state *cur_state;
+
+	cur_state = to_rcar_plane_state(plane->plane.state);
+
+	/* Lowering the number of planes doesn't strictly require reallocation
+	 * as the extra hardware plane will be freed when committing, but doing
+	 * so could lead to more fragmentation.
+	 */
+	if (!cur_state->format ||
+	    cur_state->format->planes != new_state->format->planes)
+		return true;
+
+	/* Reallocate hardware planes if the source has changed. */
+	if (cur_state->source != new_state->source)
+		return true;
+
+	return false;
+}
+
+static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
+{
+	unsigned int mask;
+
+	if (state->hwindex == -1)
+		return 0;
+
+	mask = 1 << state->hwindex;
+	if (state->format->planes == 2)
+		mask |= 1 << ((state->hwindex + 1) % 8);
+
+	return mask;
+}
+
+/*
+ * The R8A7790 DU can source frames directly from the VSP1 devices VSPD0 and
+ * VSPD1. VSPD0 feeds DU0/1 plane 0, and VSPD1 feeds either DU2 plane 0 or
+ * DU0/1 plane 1.
+ *
+ * Allocate the correct fixed plane when sourcing frames from VSPD0 or VSPD1,
+ * and allocate planes in reverse index order otherwise to ensure maximum
+ * availability of planes 0 and 1.
+ *
+ * The caller is responsible for ensuring that the requested source is
+ * compatible with the DU revision.
+ */
+static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
+				 struct rcar_du_plane_state *state,
+				 unsigned int free)
+{
+	unsigned int num_planes = state->format->planes;
+	int fixed = -1;
+	int i;
+
+	if (state->source == RCAR_DU_PLANE_VSPD0) {
+		/* VSPD0 feeds plane 0 on DU0/1. */
+		if (plane->group->index != 0)
+			return -EINVAL;
+
+		fixed = 0;
+	} else if (state->source == RCAR_DU_PLANE_VSPD1) {
+		/* VSPD1 feeds plane 1 on DU0/1 or plane 0 on DU2. */
+		fixed = plane->group->index == 0 ? 1 : 0;
+	}
+
+	if (fixed >= 0)
+		return free & (1 << fixed) ? fixed : -EBUSY;
+
+	for (i = RCAR_DU_NUM_HW_PLANES - 1; i >= 0; --i) {
+		if (!(free & (1 << i)))
+			continue;
+
+		if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
+			break;
+	}
+
+	return i < 0 ? -EBUSY : i;
+}
+
+int rcar_du_atomic_check_planes(struct drm_device *dev,
+				struct drm_atomic_state *state)
+{
+	struct rcar_du_device *rcdu = dev->dev_private;
+	unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+	unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+	bool needs_realloc = false;
+	unsigned int groups = 0;
+	unsigned int i;
+
+	/* Check if hardware planes need to be reallocated. */
+	for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+		struct rcar_du_plane_state *plane_state;
+		struct rcar_du_plane *plane;
+		unsigned int index;
+
+		if (!state->planes[i])
+			continue;
+
+		plane = to_rcar_plane(state->planes[i]);
+		plane_state = to_rcar_plane_state(state->plane_states[i]);
+
+		dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
+			plane->group->index, plane - plane->group->planes);
+
+		/* If the plane is being disabled we don't need to go through
+		 * the full reallocation procedure. Just mark the hardware
+		 * plane(s) as freed.
+		 */
+		if (!plane_state->format) {
+			dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
+				__func__);
+			index = plane - plane->group->planes;
+			group_freed_planes[plane->group->index] |= 1 << index;
+			plane_state->hwindex = -1;
+			continue;
+		}
+
+		/* If the plane needs to be reallocated mark it as such, and
+		 * mark the hardware plane(s) as free.
+		 */
+		if (rcar_du_plane_needs_realloc(plane, plane_state)) {
+			dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
+				__func__);
+			groups |= 1 << plane->group->index;
+			needs_realloc = true;
+
+			index = plane - plane->group->planes;
+			group_freed_planes[plane->group->index] |= 1 << index;
+			plane_state->hwindex = -1;
+		}
+	}
+
+	if (!needs_realloc)
+		return 0;
+
+	/* Grab all plane states for the groups that need reallocation to ensure
+	 * locking and avoid racy updates. This serializes the update operation,
+	 * but there's not much we can do about it as that's the hardware
+	 * design.
+	 *
+	 * Compute the used planes mask for each group at the same time to avoid
+	 * looping over the planes separately later.
+	 */
+	while (groups) {
+		unsigned int index = ffs(groups) - 1;
+		struct rcar_du_group *group = &rcdu->groups[index];
+		unsigned int used_planes = 0;
+
+		dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
+			__func__, index);
+
+		for (i = 0; i < group->num_planes; ++i) {
+			struct rcar_du_plane *plane = &group->planes[i];
+			struct rcar_du_plane_state *plane_state;
+			struct drm_plane_state *s;
+
+			s = drm_atomic_get_plane_state(state, &plane->plane);
+			if (IS_ERR(s))
+				return PTR_ERR(s);
+
+			/* If the plane has been freed in the above loop its
+			 * hardware planes must not be added to the used planes
+			 * bitmask. However, the current state doesn't reflect
+			 * the free state yet, as we've modified the new state
+			 * above. Use the local freed planes list to check for
+			 * that condition instead.
+			 */
+			if (group_freed_planes[index] & (1 << i)) {
+				dev_dbg(rcdu->dev,
+					"%s: plane (%u,%tu) has been freed, skipping\n",
+					__func__, plane->group->index,
+					plane - plane->group->planes);
+				continue;
+			}
+
+			plane_state = to_rcar_plane_state(plane->plane.state);
+			used_planes |= rcar_du_plane_hwmask(plane_state);
+
+			dev_dbg(rcdu->dev,
+				"%s: plane (%u,%tu) uses %u hwplanes (index %d)\n",
+				__func__, plane->group->index,
+				plane - plane->group->planes,
+				plane_state->format ?
+				plane_state->format->planes : 0,
+				plane_state->hwindex);
+		}
+
+		group_free_planes[index] = 0xff & ~used_planes;
+		groups &= ~(1 << index);
+
+		dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
+			__func__, index, group_free_planes[index]);
+	}
+
+	/* Reallocate hardware planes for each plane that needs it. */
+	for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+		struct rcar_du_plane_state *plane_state;
+		struct rcar_du_plane *plane;
+		unsigned int crtc_planes;
+		unsigned int free;
+		int idx;
+
+		if (!state->planes[i])
+			continue;
+
+		plane = to_rcar_plane(state->planes[i]);
+		plane_state = to_rcar_plane_state(state->plane_states[i]);
+
+		dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
+			plane->group->index, plane - plane->group->planes);
+
+		/* Skip planes that are being disabled or don't need to be
+		 * reallocated.
+		 */
+		if (!plane_state->format ||
+		    !rcar_du_plane_needs_realloc(plane, plane_state))
+			continue;
+
+		/* Try to allocate the plane from the free planes currently
+		 * associated with the target CRTC to avoid restarting the CRTC
+		 * group and thus minimize flicker. If it fails fall back to
+		 * allocating from all free planes.
+		 */
+		crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
+			    ? plane->group->dptsr_planes
+			    : ~plane->group->dptsr_planes;
+		free = group_free_planes[plane->group->index];
+
+		idx = rcar_du_plane_hwalloc(plane, plane_state,
+					    free & crtc_planes);
+		if (idx < 0)
+			idx = rcar_du_plane_hwalloc(plane, plane_state,
+						    free);
+		if (idx < 0) {
+			dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
+				__func__);
+			return idx;
+		}
+
+		dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
+			__func__, plane_state->format->planes, idx);
+
+		plane_state->hwindex = idx;
+
+		group_free_planes[plane->group->index] &=
+			~rcar_du_plane_hwmask(plane_state);
+
+		dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
+			__func__, plane->group->index,
+			group_free_planes[plane->group->index]);
+	}
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Plane Setup
+ */
+
 #define RCAR_DU_COLORKEY_NONE		(0 << 24)
 #define RCAR_DU_COLORKEY_SOURCE		(1 << 24)
 #define RCAR_DU_COLORKEY_MASK		(1 << 24)
 
-static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
-			      unsigned int index, u32 reg)
-{
-	return rcar_du_read(rgrp->dev,
-			    rgrp->mmio_offset + index * PLANE_OFF + reg);
-}
-
 static void rcar_du_plane_write(struct rcar_du_group *rgrp,
 				unsigned int index, u32 reg, u32 data)
 {
@@ -42,34 +322,45 @@
 		      data);
 }
 
-static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
+static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
+					const struct rcar_du_plane_state *state)
 {
-	struct rcar_du_plane_state *state =
-		to_rcar_plane_state(plane->plane.state);
-	struct drm_framebuffer *fb = plane->plane.state->fb;
-	struct rcar_du_group *rgrp = plane->group;
 	unsigned int src_x = state->state.src_x >> 16;
 	unsigned int src_y = state->state.src_y >> 16;
 	unsigned int index = state->hwindex;
-	struct drm_gem_cma_object *gem;
+	unsigned int pitch;
 	bool interlaced;
-	u32 mwr;
+	u32 dma[2];
 
 	interlaced = state->state.crtc->state->adjusted_mode.flags
 		   & DRM_MODE_FLAG_INTERLACE;
 
+	if (state->source == RCAR_DU_PLANE_MEMORY) {
+		struct drm_framebuffer *fb = state->state.fb;
+		struct drm_gem_cma_object *gem;
+		unsigned int i;
+
+		if (state->format->planes == 2)
+			pitch = fb->pitches[0];
+		else
+			pitch = fb->pitches[0] * 8 / state->format->bpp;
+
+		for (i = 0; i < state->format->planes; ++i) {
+			gem = drm_fb_cma_get_gem_obj(fb, i);
+			dma[i] = gem->paddr + fb->offsets[i];
+		}
+	} else {
+		pitch = state->state.src_w >> 16;
+		dma[0] = 0;
+		dma[1] = 0;
+	}
+
 	/* Memory pitch (expressed in pixels). Must be doubled for interlaced
 	 * operation with 32bpp formats.
 	 */
-	if (state->format->planes == 2)
-		mwr = fb->pitches[0];
-	else
-		mwr = fb->pitches[0] * 8 / state->format->bpp;
-
-	if (interlaced && state->format->bpp == 32)
-		mwr *= 2;
-
-	rcar_du_plane_write(rgrp, index, PnMWR, mwr);
+	rcar_du_plane_write(rgrp, index, PnMWR,
+			    (interlaced && state->format->bpp == 32) ?
+			    pitch * 2 : pitch);
 
 	/* The Y position is expressed in raster line units and must be doubled
 	 * for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -87,30 +378,25 @@
 	rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
 			    (!interlaced && state->format->bpp == 32 ? 2 : 1));
 
-	gem = drm_fb_cma_get_gem_obj(fb, 0);
-	rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]);
+	rcar_du_plane_write(rgrp, index, PnDSA0R, dma[0]);
 
 	if (state->format->planes == 2) {
 		index = (index + 1) % 8;
 
-		rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]);
+		rcar_du_plane_write(rgrp, index, PnMWR, pitch);
 
 		rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
 		rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
 				    (state->format->bpp == 16 ? 2 : 1) / 2);
 
-		gem = drm_fb_cma_get_gem_obj(fb, 1);
-		rcar_du_plane_write(rgrp, index, PnDSA0R,
-				    gem->paddr + fb->offsets[1]);
+		rcar_du_plane_write(rgrp, index, PnDSA0R, dma[1]);
 	}
 }
 
-static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
-				     unsigned int index)
+static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
+				     unsigned int index,
+				     const struct rcar_du_plane_state *state)
 {
-	struct rcar_du_plane_state *state =
-		to_rcar_plane_state(plane->plane.state);
-	struct rcar_du_group *rgrp = plane->group;
 	u32 colorkey;
 	u32 pnmr;
 
@@ -168,12 +454,10 @@
 	}
 }
 
-static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
-				  unsigned int index)
+static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp,
+					    unsigned int index,
+					    const struct rcar_du_plane_state *state)
 {
-	struct rcar_du_plane_state *state =
-		to_rcar_plane_state(plane->plane.state);
-	struct rcar_du_group *rgrp = plane->group;
 	u32 ddcr2 = PnDDCR2_CODE;
 	u32 ddcr4;
 
@@ -182,11 +466,8 @@
 	 * The data format is selected by the DDDF field in PnMR and the EDF
 	 * field in DDCR4.
 	 */
-	ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
-	ddcr4 &= ~PnDDCR4_EDF_MASK;
-	ddcr4 |= state->format->edf | PnDDCR4_CODE;
 
-	rcar_du_plane_setup_mode(plane, index);
+	rcar_du_plane_setup_mode(rgrp, index, state);
 
 	if (state->format->planes == 2) {
 		if (state->hwindex != index) {
@@ -204,31 +485,72 @@
 	}
 
 	rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+
+	ddcr4 = state->format->edf | PnDDCR4_CODE;
+	if (state->source != RCAR_DU_PLANE_MEMORY)
+		ddcr4 |= PnDDCR4_VSPS;
+
 	rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
-
-	/* Destination position and size */
-	rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w);
-	rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h);
-	rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x);
-	rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y);
-
-	/* Wrap-around and blinking, disabled */
-	rcar_du_plane_write(rgrp, index, PnWASPR, 0);
-	rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
-	rcar_du_plane_write(rgrp, index, PnBTR, 0);
-	rcar_du_plane_write(rgrp, index, PnMLR, 0);
 }
 
-void rcar_du_plane_setup(struct rcar_du_plane *plane)
+static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
+					    unsigned int index,
+					    const struct rcar_du_plane_state *state)
 {
-	struct rcar_du_plane_state *state =
-		to_rcar_plane_state(plane->plane.state);
+	rcar_du_plane_write(rgrp, index, PnMR,
+			    PnMR_SPIM_TP_OFF | state->format->pnmr);
 
-	__rcar_du_plane_setup(plane, state->hwindex);
+	rcar_du_plane_write(rgrp, index, PnDDCR4,
+			    state->format->edf | PnDDCR4_CODE);
+}
+
+static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp,
+				       unsigned int index,
+				       const struct rcar_du_plane_state *state)
+{
+	struct rcar_du_device *rcdu = rgrp->dev;
+
+	if (rcdu->info->gen < 3)
+		rcar_du_plane_setup_format_gen2(rgrp, index, state);
+	else
+		rcar_du_plane_setup_format_gen3(rgrp, index, state);
+
+	/* Destination position and size */
+	rcar_du_plane_write(rgrp, index, PnDSXR, state->state.crtc_w);
+	rcar_du_plane_write(rgrp, index, PnDSYR, state->state.crtc_h);
+	rcar_du_plane_write(rgrp, index, PnDPXR, state->state.crtc_x);
+	rcar_du_plane_write(rgrp, index, PnDPYR, state->state.crtc_y);
+
+	if (rcdu->info->gen < 3) {
+		/* Wrap-around and blinking, disabled */
+		rcar_du_plane_write(rgrp, index, PnWASPR, 0);
+		rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
+		rcar_du_plane_write(rgrp, index, PnBTR, 0);
+		rcar_du_plane_write(rgrp, index, PnMLR, 0);
+	}
+}
+
+void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
+			   const struct rcar_du_plane_state *state)
+{
+	struct rcar_du_device *rcdu = rgrp->dev;
+
+	rcar_du_plane_setup_format(rgrp, state->hwindex, state);
 	if (state->format->planes == 2)
-		__rcar_du_plane_setup(plane, (state->hwindex + 1) % 8);
+		rcar_du_plane_setup_format(rgrp, (state->hwindex + 1) % 8,
+					   state);
 
-	rcar_du_plane_setup_fb(plane);
+	if (rcdu->info->gen < 3)
+		rcar_du_plane_setup_scanout(rgrp, state);
+
+	if (state->source == RCAR_DU_PLANE_VSPD1) {
+		unsigned int vspd1_sink = rgrp->index ? 2 : 0;
+
+		if (rcdu->vspd1_sink != vspd1_sink) {
+			rcdu->vspd1_sink = vspd1_sink;
+			rcar_du_set_dpad0_vsp1_routing(rcdu);
+		}
+	}
 }
 
 static int rcar_du_plane_atomic_check(struct drm_plane *plane,
@@ -263,9 +585,27 @@
 					struct drm_plane_state *old_state)
 {
 	struct rcar_du_plane *rplane = to_rcar_plane(plane);
+	struct rcar_du_plane_state *old_rstate;
+	struct rcar_du_plane_state *new_rstate;
 
-	if (plane->state->crtc)
-		rcar_du_plane_setup(rplane);
+	if (!plane->state->crtc)
+		return;
+
+	rcar_du_plane_setup(rplane);
+
+	/* Check whether the source has changed from memory to live source or
+	 * from live source to memory. The source has been configured by the
+	 * VSPS bit in the PnDDCR4 register. Although the datasheet states that
+	 * the bit is updated during vertical blanking, it seems that updates
+	 * only occur when the DU group is held in reset through the DSYSR.DRES
+	 * bit. We thus need to restart the group if the source changes.
+	 */
+	old_rstate = to_rcar_plane_state(old_state);
+	new_rstate = to_rcar_plane_state(plane->state);
+
+	if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) !=
+	    (new_rstate->source == RCAR_DU_PLANE_MEMORY))
+		rplane->group->need_restart = true;
 }
 
 static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
@@ -313,6 +653,7 @@
 		return;
 
 	state->hwindex = -1;
+	state->source = RCAR_DU_PLANE_MEMORY;
 	state->alpha = 255;
 	state->colorkey = RCAR_DU_COLORKEY_NONE;
 	state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 9732bff..b18b7b2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -28,6 +28,12 @@
 #define RCAR_DU_NUM_KMS_PLANES		9
 #define RCAR_DU_NUM_HW_PLANES		8
 
+enum rcar_du_plane_source {
+	RCAR_DU_PLANE_MEMORY,
+	RCAR_DU_PLANE_VSPD0,
+	RCAR_DU_PLANE_VSPD1,
+};
+
 struct rcar_du_plane {
 	struct drm_plane plane;
 	struct rcar_du_group *group;
@@ -52,6 +58,7 @@
 
 	const struct rcar_du_format_info *format;
 	int hwindex;
+	enum rcar_du_plane_source source;
 
 	unsigned int alpha;
 	unsigned int colorkey;
@@ -64,8 +71,20 @@
 	return container_of(state, struct rcar_du_plane_state, state);
 }
 
+int rcar_du_atomic_check_planes(struct drm_device *dev,
+				struct drm_atomic_state *state);
+
 int rcar_du_planes_init(struct rcar_du_group *rgrp);
 
-void rcar_du_plane_setup(struct rcar_du_plane *plane);
+void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
+			   const struct rcar_du_plane_state *state);
+
+static inline void rcar_du_plane_setup(struct rcar_du_plane *plane)
+{
+	struct rcar_du_plane_state *state =
+		to_rcar_plane_state(plane->plane.state);
+
+	return __rcar_du_plane_setup(plane->group, state);
+}
 
 #endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 70fcbc4..d2f6606 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -1,7 +1,7 @@
 /*
  * rcar_du_regs.h  --  R-Car Display Unit Registers Definitions
  *
- * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -16,6 +16,7 @@
 #define DU0_REG_OFFSET		0x00000
 #define DU1_REG_OFFSET		0x30000
 #define DU2_REG_OFFSET		0x40000
+#define DU3_REG_OFFSET		0x70000
 
 /* -----------------------------------------------------------------------------
  * Display Control Registers
@@ -186,7 +187,7 @@
 
 #define DEFR6			0x000e8
 #define DEFR6_CODE		(0x7778 << 16)
-#define DEFR6_ODPM22_D2SMR	(0 << 10)
+#define DEFR6_ODPM22_DSMR	(0 << 10)
 #define DEFR6_ODPM22_DISP	(2 << 10)
 #define DEFR6_ODPM22_CDE	(3 << 10)
 #define DEFR6_ODPM22_MASK	(3 << 10)
@@ -260,6 +261,21 @@
 #define DIDSR_PDCS_CLK(n, clk)	(clk << ((n) * 2))
 #define DIDSR_PDCS_MASK(n)	(3 << ((n) * 2))
 
+#define DEFR10			0x20038
+#define DEFR10_CODE		(0x7795 << 16)
+#define DEFR10_VSPF1_RGB	(0 << 14)
+#define DEFR10_VSPF1_YC		(1 << 14)
+#define DEFR10_DOCF1_RGB	(0 << 12)
+#define DEFR10_DOCF1_YC		(1 << 12)
+#define DEFR10_YCDF0_YCBCR444	(0 << 11)
+#define DEFR10_YCDF0_YCBCR422	(1 << 11)
+#define DEFR10_VSPF0_RGB	(0 << 10)
+#define DEFR10_VSPF0_YC		(1 << 10)
+#define DEFR10_DOCF0_RGB	(0 << 8)
+#define DEFR10_DOCF0_YC		(1 << 8)
+#define DEFR10_TSEL_H3_TCON1	(0 << 1) /* DEFR102 register only (DU2/DU3) */
+#define DEFR10_DEFE10		(1 << 0)
+
 /* -----------------------------------------------------------------------------
  * Display Timing Generation Registers
  */
@@ -389,6 +405,7 @@
 
 #define PnDDCR4			0x00190
 #define PnDDCR4_CODE		(0x7766 << 16)
+#define PnDDCR4_VSPS		(1 << 13)
 #define PnDDCR4_SDFS_RGB	(0 << 4)
 #define PnDDCR4_SDFS_YC		(5 << 4)
 #define PnDDCR4_SDFS_MASK	(7 << 4)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index e0a5d8f..9d7e5c9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -31,12 +31,6 @@
 	.best_encoder = rcar_du_connector_best_encoder,
 };
 
-static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
-{
-	drm_connector_unregister(connector);
-	drm_connector_cleanup(connector);
-}
-
 static enum drm_connector_status
 rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -48,7 +42,7 @@
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = rcar_du_vga_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
-	.destroy = rcar_du_vga_connector_destroy,
+	.destroy = drm_connector_cleanup,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
@@ -76,9 +70,6 @@
 		return ret;
 
 	drm_connector_helper_add(connector, &connector_helper_funcs);
-	ret = drm_connector_register(connector);
-	if (ret < 0)
-		return ret;
 
 	connector->dpms = DRM_MODE_DPMS_OFF;
 	drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
new file mode 100644
index 0000000..de7ef04
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -0,0 +1,384 @@
+/*
+ * rcar_du_vsp.h  --  R-Car Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/of_platform.h>
+#include <linux/videodev2.h>
+
+#include <media/vsp1.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_vsp.h"
+
+void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
+{
+	const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
+	struct rcar_du_device *rcdu = crtc->group->dev;
+	struct rcar_du_plane_state state = {
+		.state = {
+			.crtc = &crtc->crtc,
+			.crtc_x = 0,
+			.crtc_y = 0,
+			.crtc_w = mode->hdisplay,
+			.crtc_h = mode->vdisplay,
+			.src_x = 0,
+			.src_y = 0,
+			.src_w = mode->hdisplay << 16,
+			.src_h = mode->vdisplay << 16,
+		},
+		.format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
+		.source = RCAR_DU_PLANE_VSPD1,
+		.alpha = 255,
+		.colorkey = 0,
+		.zpos = 0,
+	};
+
+	if (rcdu->info->gen >= 3)
+		state.hwindex = (crtc->index % 2) ? 2 : 0;
+	else
+		state.hwindex = crtc->index % 2;
+
+	__rcar_du_plane_setup(crtc->group, &state);
+
+	/* Ensure that the plane source configuration takes effect by requesting
+	 * a restart of the group. See rcar_du_plane_atomic_update() for a more
+	 * detailed explanation.
+	 *
+	 * TODO: Check whether this is still needed on Gen3.
+	 */
+	crtc->group->need_restart = true;
+
+	vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+}
+
+void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
+{
+	vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+}
+
+void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
+{
+	vsp1_du_atomic_begin(crtc->vsp->vsp);
+}
+
+void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
+{
+	vsp1_du_atomic_flush(crtc->vsp->vsp);
+}
+
+/* Keep the two tables in sync. */
+static const u32 formats_kms[] = {
+	DRM_FORMAT_RGB332,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_BGR888,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_BGRA8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_NV16,
+	DRM_FORMAT_NV61,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YVU420,
+	DRM_FORMAT_YUV422,
+	DRM_FORMAT_YVU422,
+	DRM_FORMAT_YUV444,
+	DRM_FORMAT_YVU444,
+};
+
+static const u32 formats_v4l2[] = {
+	V4L2_PIX_FMT_RGB332,
+	V4L2_PIX_FMT_ARGB444,
+	V4L2_PIX_FMT_XRGB444,
+	V4L2_PIX_FMT_ARGB555,
+	V4L2_PIX_FMT_XRGB555,
+	V4L2_PIX_FMT_RGB565,
+	V4L2_PIX_FMT_RGB24,
+	V4L2_PIX_FMT_BGR24,
+	V4L2_PIX_FMT_ARGB32,
+	V4L2_PIX_FMT_XRGB32,
+	V4L2_PIX_FMT_ABGR32,
+	V4L2_PIX_FMT_XBGR32,
+	V4L2_PIX_FMT_UYVY,
+	V4L2_PIX_FMT_VYUY,
+	V4L2_PIX_FMT_YUYV,
+	V4L2_PIX_FMT_YVYU,
+	V4L2_PIX_FMT_NV12M,
+	V4L2_PIX_FMT_NV21M,
+	V4L2_PIX_FMT_NV16M,
+	V4L2_PIX_FMT_NV61M,
+	V4L2_PIX_FMT_YUV420M,
+	V4L2_PIX_FMT_YVU420M,
+	V4L2_PIX_FMT_YUV422M,
+	V4L2_PIX_FMT_YVU422M,
+	V4L2_PIX_FMT_YUV444M,
+	V4L2_PIX_FMT_YVU444M,
+};
+
+static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
+{
+	struct rcar_du_vsp_plane_state *state =
+		to_rcar_vsp_plane_state(plane->plane.state);
+	struct drm_framebuffer *fb = plane->plane.state->fb;
+	struct v4l2_rect src;
+	struct v4l2_rect dst;
+	dma_addr_t paddr[2] = { 0, };
+	u32 pixelformat = 0;
+	unsigned int i;
+
+	src.left = state->state.src_x >> 16;
+	src.top = state->state.src_y >> 16;
+	src.width = state->state.src_w >> 16;
+	src.height = state->state.src_h >> 16;
+
+	dst.left = state->state.crtc_x;
+	dst.top = state->state.crtc_y;
+	dst.width = state->state.crtc_w;
+	dst.height = state->state.crtc_h;
+
+	for (i = 0; i < state->format->planes; ++i) {
+		struct drm_gem_cma_object *gem;
+
+		gem = drm_fb_cma_get_gem_obj(fb, i);
+		paddr[i] = gem->paddr + fb->offsets[i];
+	}
+
+	for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
+		if (formats_kms[i] == state->format->fourcc) {
+			pixelformat = formats_v4l2[i];
+			break;
+		}
+	}
+
+	WARN_ON(!pixelformat);
+
+	vsp1_du_atomic_update(plane->vsp->vsp, plane->index, pixelformat,
+			      fb->pitches[0], paddr, &src, &dst);
+}
+
+static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
+					  struct drm_plane_state *state)
+{
+	struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+	struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
+	struct rcar_du_device *rcdu = rplane->vsp->dev;
+
+	if (!state->fb || !state->crtc) {
+		rstate->format = NULL;
+		return 0;
+	}
+
+	if (state->src_w >> 16 != state->crtc_w ||
+	    state->src_h >> 16 != state->crtc_h) {
+		dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	rstate->format = rcar_du_format_info(state->fb->pixel_format);
+	if (rstate->format == NULL) {
+		dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+			state->fb->pixel_format);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
+					struct drm_plane_state *old_state)
+{
+	struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
+
+	if (plane->state->crtc)
+		rcar_du_vsp_plane_setup(rplane);
+	else
+		vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, 0, 0, 0,
+				      NULL, NULL);
+}
+
+static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
+	.atomic_check = rcar_du_vsp_plane_atomic_check,
+	.atomic_update = rcar_du_vsp_plane_atomic_update,
+};
+
+static struct drm_plane_state *
+rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+	struct rcar_du_vsp_plane_state *state;
+	struct rcar_du_vsp_plane_state *copy;
+
+	if (WARN_ON(!plane->state))
+		return NULL;
+
+	state = to_rcar_vsp_plane_state(plane->state);
+	copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+	if (copy == NULL)
+		return NULL;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+
+	return &copy->state;
+}
+
+static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
+						   struct drm_plane_state *state)
+{
+	__drm_atomic_helper_plane_destroy_state(plane, state);
+	kfree(to_rcar_vsp_plane_state(state));
+}
+
+static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
+{
+	struct rcar_du_vsp_plane_state *state;
+
+	if (plane->state) {
+		rcar_du_vsp_plane_atomic_destroy_state(plane, plane->state);
+		plane->state = NULL;
+	}
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL)
+		return;
+
+	state->alpha = 255;
+
+	plane->state = &state->state;
+	plane->state->plane = plane;
+}
+
+static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
+	struct drm_plane_state *state, struct drm_property *property,
+	uint64_t val)
+{
+	struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+	struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
+
+	if (property == rcdu->props.alpha)
+		rstate->alpha = val;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
+	const struct drm_plane_state *state, struct drm_property *property,
+	uint64_t *val)
+{
+	const struct rcar_du_vsp_plane_state *rstate =
+		container_of(state, const struct rcar_du_vsp_plane_state, state);
+	struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
+
+	if (property == rcdu->props.alpha)
+		*val = rstate->alpha;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = rcar_du_vsp_plane_reset,
+	.set_property = drm_atomic_helper_plane_set_property,
+	.destroy = drm_plane_cleanup,
+	.atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
+	.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
+	.atomic_set_property = rcar_du_vsp_plane_atomic_set_property,
+	.atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
+};
+
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
+{
+	struct rcar_du_device *rcdu = vsp->dev;
+	struct platform_device *pdev;
+	struct device_node *np;
+	unsigned int i;
+	int ret;
+
+	/* Find the VSP device and initialize it. */
+	np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index);
+	if (!np) {
+		dev_err(rcdu->dev, "vsps node not found\n");
+		return -ENXIO;
+	}
+
+	pdev = of_find_device_by_node(np);
+	of_node_put(np);
+	if (!pdev)
+		return -ENXIO;
+
+	vsp->vsp = &pdev->dev;
+
+	ret = vsp1_du_init(vsp->vsp);
+	if (ret < 0)
+		return ret;
+
+	 /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
+	  * 4 RPFs.
+	  */
+	vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+
+	vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
+				   sizeof(*vsp->planes), GFP_KERNEL);
+	if (!vsp->planes)
+		return -ENOMEM;
+
+	for (i = 0; i < vsp->num_planes; ++i) {
+		enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY
+					 : DRM_PLANE_TYPE_PRIMARY;
+		struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+		plane->vsp = vsp;
+		plane->index = i;
+
+		ret = drm_universal_plane_init(rcdu->ddev, &plane->plane,
+					       1 << vsp->index,
+					       &rcar_du_vsp_plane_funcs,
+					       formats_kms,
+					       ARRAY_SIZE(formats_kms), type,
+					       NULL);
+		if (ret < 0)
+			return ret;
+
+		drm_plane_helper_add(&plane->plane,
+				     &rcar_du_vsp_plane_helper_funcs);
+
+		if (type == DRM_PLANE_TYPE_PRIMARY)
+			continue;
+
+		drm_object_attach_property(&plane->plane.base,
+					   rcdu->props.alpha, 255);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
new file mode 100644
index 0000000..df3bf38
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -0,0 +1,76 @@
+/*
+ * rcar_du_vsp.h  --  R-Car Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_VSP_H__
+#define __RCAR_DU_VSP_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct rcar_du_format_info;
+struct rcar_du_vsp;
+
+struct rcar_du_vsp_plane {
+	struct drm_plane plane;
+	struct rcar_du_vsp *vsp;
+	unsigned int index;
+};
+
+struct rcar_du_vsp {
+	unsigned int index;
+	struct device *vsp;
+	struct rcar_du_device *dev;
+	struct rcar_du_vsp_plane *planes;
+	unsigned int num_planes;
+};
+
+static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
+{
+	return container_of(p, struct rcar_du_vsp_plane, plane);
+}
+
+/**
+ * struct rcar_du_vsp_plane_state - Driver-specific plane state
+ * @state: base DRM plane state
+ * @format: information about the pixel format used by the plane
+ * @alpha: value of the plane alpha property
+ */
+struct rcar_du_vsp_plane_state {
+	struct drm_plane_state state;
+
+	const struct rcar_du_format_info *format;
+
+	unsigned int alpha;
+};
+
+static inline struct rcar_du_vsp_plane_state *
+to_rcar_vsp_plane_state(struct drm_plane_state *state)
+{
+	return container_of(state, struct rcar_du_vsp_plane_state, state);
+}
+
+#ifdef CONFIG_DRM_RCAR_VSP
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp);
+void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
+#else
+static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; };
+static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
+#endif
+
+#endif /* __RCAR_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
index 77cf928..d7d294b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -1,7 +1,7 @@
 /*
  * rcar_lvds_regs.h  --  R-Car LVDS Interface Registers Definitions
  *
- * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
  *
  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  *
@@ -15,28 +15,38 @@
 
 #define LVDCR0				0x0000
 #define LVDCR0_DUSEL			(1 << 15)
-#define LVDCR0_DMD			(1 << 12)
+#define LVDCR0_DMD			(1 << 12)		/* Gen2 only */
 #define LVDCR0_LVMD_MASK		(0xf << 8)
 #define LVDCR0_LVMD_SHIFT		8
-#define LVDCR0_PLLEN			(1 << 4)
-#define LVDCR0_BEN			(1 << 2)
-#define LVDCR0_LVEN			(1 << 1)
+#define LVDCR0_PLLON			(1 << 4)
+#define LVDCR0_PWD			(1 << 2)		/* Gen3 only */
+#define LVDCR0_BEN			(1 << 2)		/* Gen2 only */
+#define LVDCR0_LVEN			(1 << 1)		/* Gen2 only */
 #define LVDCR0_LVRES			(1 << 0)
 
 #define LVDCR1				0x0004
-#define LVDCR1_CKSEL			(1 << 15)
-#define LVDCR1_CHSTBY(n)		(3 << (2 + (n) * 2))
-#define LVDCR1_CLKSTBY			(3 << 0)
+#define LVDCR1_CKSEL			(1 << 15)		/* Gen2 only */
+#define LVDCR1_CHSTBY_GEN2(n)		(3 << (2 + (n) * 2))	/* Gen2 only */
+#define LVDCR1_CHSTBY_GEN3(n)		(1 << (2 + (n) * 2))	/* Gen3 only */
+#define LVDCR1_CLKSTBY_GEN2		(3 << 0)		/* Gen2 only */
+#define LVDCR1_CLKSTBY_GEN3		(1 << 0)		/* Gen3 only */
 
 #define LVDPLLCR			0x0008
 #define LVDPLLCR_CEEN			(1 << 14)
 #define LVDPLLCR_FBEN			(1 << 13)
 #define LVDPLLCR_COSEL			(1 << 12)
+/* Gen2 */
 #define LVDPLLCR_PLLDLYCNT_150M		(0x1bf << 0)
 #define LVDPLLCR_PLLDLYCNT_121M		(0x22c << 0)
 #define LVDPLLCR_PLLDLYCNT_60M		(0x77b << 0)
 #define LVDPLLCR_PLLDLYCNT_38M		(0x69a << 0)
 #define LVDPLLCR_PLLDLYCNT_MASK		(0x7ff << 0)
+/* Gen3 */
+#define LVDPLLCR_PLLDIVCNT_42M		(0x014cb << 0)
+#define LVDPLLCR_PLLDIVCNT_85M		(0x00a45 << 0)
+#define LVDPLLCR_PLLDIVCNT_128M		(0x006c3 << 0)
+#define LVDPLLCR_PLLDIVCNT_148M		(0x046c1 << 0)
+#define LVDPLLCR_PLLDIVCNT_MASK		(0x7ffff << 0)
 
 #define LVDCTRCR			0x000c
 #define LVDCTRCR_CTR3SEL_ZERO		(0 << 12)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 8573985..76b3362 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -35,3 +35,11 @@
 	 for the Synopsys DesignWare HDMI driver. If you want to
 	 enable MIPI DSI on RK3288 based SoC, you should selet this
 	 option.
+
+config ROCKCHIP_INNO_HDMI
+	tristate "Rockchip specific extensions for Innosilicon HDMI"
+	depends on DRM_ROCKCHIP
+	help
+	  This selects support for Rockchip SoC specific extensions
+	  for the Innosilicon HDMI driver. If you want to enable
+	  HDMI on RK3036 based SoC, you should select this option.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f6a809a..df8fbef 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -8,5 +8,6 @@
 
 obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
+obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
 
 obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index f8f8f29..7975158 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -875,17 +875,10 @@
 	clk_disable_unprepare(dsi->pclk);
 }
 
-static bool dw_mipi_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
-					const struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
 {
 	struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-	int mux  = rockchip_drm_encoder_get_mux_id(dsi->dev->of_node, encoder);
+	int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
 	u32 interface_pix_fmt;
 	u32 val;
 
@@ -931,7 +924,6 @@
 
 static struct drm_encoder_helper_funcs
 dw_mipi_dsi_encoder_helper_funcs = {
-	.mode_fixup = dw_mipi_dsi_encoder_mode_fixup,
 	.commit = dw_mipi_dsi_encoder_commit,
 	.mode_set = dw_mipi_dsi_encoder_mode_set,
 	.disable = dw_mipi_dsi_encoder_disable,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index c65ce8c..3d3cf2f 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -204,7 +204,7 @@
 	rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
 				      ROCKCHIP_OUT_MODE_AAAA);
 
-	mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+	mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
 	if (mux)
 		val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
 	else
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
new file mode 100644
index 0000000..10d62ff
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ *    Zheng Yang <zhengyang@rock-chips.com>
+ *    Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#include "inno_hdmi.h"
+
+#define to_inno_hdmi(x)	container_of(x, struct inno_hdmi, x)
+
+struct hdmi_data_info {
+	int vic;
+	bool sink_is_hdmi;
+	bool sink_has_audio;
+	unsigned int enc_in_format;
+	unsigned int enc_out_format;
+	unsigned int colorimetry;
+};
+
+struct inno_hdmi_i2c {
+	struct i2c_adapter adap;
+
+	u8 ddc_addr;
+	u8 segment_addr;
+
+	struct mutex lock;
+	struct completion cmp;
+};
+
+struct inno_hdmi {
+	struct device *dev;
+	struct drm_device *drm_dev;
+
+	int irq;
+	struct clk *pclk;
+	void __iomem *regs;
+
+	struct drm_connector	connector;
+	struct drm_encoder	encoder;
+
+	struct inno_hdmi_i2c *i2c;
+	struct i2c_adapter *ddc;
+
+	unsigned int tmds_rate;
+
+	struct hdmi_data_info	hdmi_data;
+	struct drm_display_mode previous_mode;
+};
+
+enum {
+	CSC_ITU601_16_235_TO_RGB_0_255_8BIT,
+	CSC_ITU601_0_255_TO_RGB_0_255_8BIT,
+	CSC_ITU709_16_235_TO_RGB_0_255_8BIT,
+	CSC_RGB_0_255_TO_ITU601_16_235_8BIT,
+	CSC_RGB_0_255_TO_ITU709_16_235_8BIT,
+	CSC_RGB_0_255_TO_RGB_16_235_8BIT,
+};
+
+static const char coeff_csc[][24] = {
+	/*
+	 * YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]):
+	 *   R = 1.164*Y + 1.596*V - 204
+	 *   G = 1.164*Y - 0.391*U - 0.813*V + 154
+	 *   B = 1.164*Y + 2.018*U - 258
+	 */
+	{
+		0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc,
+		0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a,
+		0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02
+	},
+	/*
+	 * YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]):
+	 *   R = Y + 1.402*V - 248
+	 *   G = Y - 0.344*U - 0.714*V + 135
+	 *   B = Y + 1.772*U - 227
+	 */
+	{
+		0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8,
+		0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87,
+		0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3
+	},
+	/*
+	 * YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]):
+	 *   R = 1.164*Y + 1.793*V - 248
+	 *   G = 1.164*Y - 0.213*U - 0.534*V + 77
+	 *   B = 1.164*Y + 2.115*U - 289
+	 */
+	{
+		0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8,
+		0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d,
+		0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21
+	},
+
+	/*
+	 * RGB2YUV:601 SD mode:
+	 *   Cb = -0.291G - 0.148R + 0.439B + 128
+	 *   Y  = 0.504G  + 0.257R + 0.098B + 16
+	 *   Cr = -0.368G + 0.439R - 0.071B + 128
+	 */
+	{
+		0x11, 0x5f, 0x01, 0x82, 0x10, 0x23, 0x00, 0x80,
+		0x02, 0x1c, 0x00, 0xa1, 0x00, 0x36, 0x00, 0x1e,
+		0x11, 0x29, 0x10, 0x59, 0x01, 0x82, 0x00, 0x80
+	},
+	/*
+	 * RGB2YUV:709 HD mode:
+	 *   Cb = - 0.338G - 0.101R + 0.439B + 128
+	 *   Y  = 0.614G   + 0.183R + 0.062B + 16
+	 *   Cr = - 0.399G + 0.439R - 0.040B + 128
+	 */
+	{
+		0x11, 0x98, 0x01, 0xc1, 0x10, 0x28, 0x00, 0x80,
+		0x02, 0x74, 0x00, 0xbb, 0x00, 0x3f, 0x00, 0x10,
+		0x11, 0x5a, 0x10, 0x67, 0x01, 0xc1, 0x00, 0x80
+	},
+	/*
+	 * RGB[0:255]2RGB[16:235]:
+	 *   R' = R x (235-16)/255 + 16;
+	 *   G' = G x (235-16)/255 + 16;
+	 *   B' = B x (235-16)/255 + 16;
+	 */
+	{
+		0x00, 0x00, 0x03, 0x6F, 0x00, 0x00, 0x00, 0x10,
+		0x03, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+		0x00, 0x00, 0x00, 0x00, 0x03, 0x6F, 0x00, 0x10
+	},
+};
+
+static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset)
+{
+	return readl_relaxed(hdmi->regs + (offset) * 0x04);
+}
+
+static inline void hdmi_writeb(struct inno_hdmi *hdmi, u16 offset, u32 val)
+{
+	writel_relaxed(val, hdmi->regs + (offset) * 0x04);
+}
+
+static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset,
+			     u32 msk, u32 val)
+{
+	u8 temp = hdmi_readb(hdmi, offset) & ~msk;
+
+	temp |= val & msk;
+	hdmi_writeb(hdmi, offset, temp);
+}
+
+static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi)
+{
+	int ddc_bus_freq;
+
+	ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE;
+
+	hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
+	hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
+
+	/* Clear the EDID interrupt flag and mute the interrupt */
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+}
+
+static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable)
+{
+	if (enable)
+		hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_ON);
+	else
+		hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF);
+}
+
+static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
+{
+	switch (mode) {
+	case NORMAL:
+		inno_hdmi_sys_power(hdmi, false);
+
+		hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f);
+		hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb);
+
+		hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+		hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
+		hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
+		hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
+		hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
+		hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
+
+		inno_hdmi_sys_power(hdmi, true);
+		break;
+
+	case LOWER_PWR:
+		inno_hdmi_sys_power(hdmi, false);
+		hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
+		hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
+		hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
+		hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+
+		break;
+
+	default:
+		dev_err(hdmi->dev, "Unknown power mode %d\n", mode);
+	}
+}
+
+static void inno_hdmi_reset(struct inno_hdmi *hdmi)
+{
+	u32 val;
+	u32 msk;
+
+	hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
+	udelay(100);
+
+	hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
+	udelay(100);
+
+	msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
+	val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
+	hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
+
+	inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+}
+
+static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc,
+				  union hdmi_infoframe *frame, u32 frame_index,
+				  u32 mask, u32 disable, u32 enable)
+{
+	if (mask)
+		hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable);
+
+	hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index);
+
+	if (setup_rc >= 0) {
+		u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+		ssize_t rc, i;
+
+		rc = hdmi_infoframe_pack(frame, packed_frame,
+					 sizeof(packed_frame));
+		if (rc < 0)
+			return rc;
+
+		for (i = 0; i < rc; i++)
+			hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
+				    packed_frame[i]);
+
+		if (mask)
+			hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable);
+	}
+
+	return setup_rc;
+}
+
+static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
+				      struct drm_display_mode *mode)
+{
+	union hdmi_infoframe frame;
+	int rc;
+
+	rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+							 mode);
+
+	return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
+		m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1));
+}
+
+static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
+				      struct drm_display_mode *mode)
+{
+	union hdmi_infoframe frame;
+	int rc;
+
+	rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+
+	if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+		frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+	else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+		frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
+	else
+		frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+	return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0);
+}
+
+static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
+{
+	struct hdmi_data_info *data = &hdmi->hdmi_data;
+	int c0_c2_change = 0;
+	int csc_enable = 0;
+	int csc_mode = 0;
+	int auto_csc = 0;
+	int value;
+	int i;
+
+	/* Input video mode is SDR RGB24bit, data enable signal from external */
+	hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL1, v_DE_EXTERNAL |
+		    v_VIDEO_INPUT_FORMAT(VIDEO_INPUT_SDR_RGB444));
+
+	/* Input color hardcode to RGB, and output color hardcode to RGB888 */
+	value = v_VIDEO_INPUT_BITS(VIDEO_INPUT_8BITS) |
+		v_VIDEO_OUTPUT_COLOR(0) |
+		v_VIDEO_INPUT_CSP(0);
+	hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value);
+
+	if (data->enc_in_format == data->enc_out_format) {
+		if ((data->enc_in_format == HDMI_COLORSPACE_RGB) ||
+		    (data->enc_in_format >= HDMI_COLORSPACE_YUV444)) {
+			value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1);
+			hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
+
+			hdmi_modb(hdmi, HDMI_VIDEO_CONTRL,
+				  m_VIDEO_AUTO_CSC | m_VIDEO_C0_C2_SWAP,
+				  v_VIDEO_AUTO_CSC(AUTO_CSC_DISABLE) |
+				  v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE));
+			return 0;
+		}
+	}
+
+	if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) {
+		if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
+		    (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
+			csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
+			auto_csc = AUTO_CSC_DISABLE;
+			c0_c2_change = C0_C2_CHANGE_DISABLE;
+			csc_enable = v_CSC_ENABLE;
+		} else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
+			   (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
+			csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT;
+			auto_csc = AUTO_CSC_ENABLE;
+			c0_c2_change = C0_C2_CHANGE_DISABLE;
+			csc_enable = v_CSC_DISABLE;
+		}
+	} else {
+		if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
+		    (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
+			csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
+			auto_csc = AUTO_CSC_DISABLE;
+			c0_c2_change = C0_C2_CHANGE_DISABLE;
+			csc_enable = v_CSC_ENABLE;
+		} else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
+			   (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
+			csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT;
+			auto_csc = AUTO_CSC_ENABLE;
+			c0_c2_change = C0_C2_CHANGE_DISABLE;
+			csc_enable = v_CSC_DISABLE;
+		}
+	}
+
+	for (i = 0; i < 24; i++)
+		hdmi_writeb(hdmi, HDMI_VIDEO_CSC_COEF + i,
+			    coeff_csc[csc_mode][i]);
+
+	value = v_SOF_DISABLE | csc_enable | v_COLOR_DEPTH_NOT_INDICATED(1);
+	hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
+	hdmi_modb(hdmi, HDMI_VIDEO_CONTRL, m_VIDEO_AUTO_CSC |
+		  m_VIDEO_C0_C2_SWAP, v_VIDEO_AUTO_CSC(auto_csc) |
+		  v_VIDEO_C0_C2_SWAP(c0_c2_change));
+
+	return 0;
+}
+
+static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+					 struct drm_display_mode *mode)
+{
+	int value;
+
+	/* Set detail external video timing polarity and interlace mode */
+	value = v_EXTERANL_VIDEO(1);
+	value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
+		 v_HSYNC_POLARITY(1) : v_HSYNC_POLARITY(0);
+	value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
+		 v_VSYNC_POLARITY(1) : v_VSYNC_POLARITY(0);
+	value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
+		 v_INETLACE(1) : v_INETLACE(0);
+	hdmi_writeb(hdmi, HDMI_VIDEO_TIMING_CTL, value);
+
+	/* Set detail external video timing */
+	value = mode->htotal;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_L, value & 0xFF);
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_H, (value >> 8) & 0xFF);
+
+	value = mode->htotal - mode->hdisplay;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+	value = mode->hsync_start - mode->hdisplay;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+	value = mode->hsync_end - mode->hsync_start;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_L, value & 0xFF);
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_H, (value >> 8) & 0xFF);
+
+	value = mode->vtotal;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_L, value & 0xFF);
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_H, (value >> 8) & 0xFF);
+
+	value = mode->vtotal - mode->vdisplay;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
+
+	value = mode->vsync_start - mode->vdisplay;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
+
+	value = mode->vsync_end - mode->vsync_start;
+	hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDURATION, value & 0xFF);
+
+	hdmi_writeb(hdmi, HDMI_PHY_PRE_DIV_RATIO, 0x1e);
+	hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_LOW, 0x2c);
+	hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH, 0x01);
+
+	return 0;
+}
+
+static int inno_hdmi_setup(struct inno_hdmi *hdmi,
+			   struct drm_display_mode *mode)
+{
+	hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
+
+	hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB;
+	hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
+
+	if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) ||
+	    (hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) ||
+	    (hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) ||
+	    (hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18))
+		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+	else
+		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+	/* Mute video and audio output */
+	hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
+		  v_AUDIO_MUTE(1) | v_VIDEO_MUTE(1));
+
+	/* Set HDMI Mode */
+	hdmi_writeb(hdmi, HDMI_HDCP_CTRL,
+		    v_HDMI_DVI(hdmi->hdmi_data.sink_is_hdmi));
+
+	inno_hdmi_config_video_timing(hdmi, mode);
+
+	inno_hdmi_config_video_csc(hdmi);
+
+	if (hdmi->hdmi_data.sink_is_hdmi) {
+		inno_hdmi_config_video_avi(hdmi, mode);
+		inno_hdmi_config_video_vsi(hdmi, mode);
+	}
+
+	/*
+	 * When IP controller have configured to an accurate video
+	 * timing, then the TMDS clock source would be switched to
+	 * DCLK_LCDC, so we need to init the TMDS rate to mode pixel
+	 * clock rate, and reconfigure the DDC clock.
+	 */
+	hdmi->tmds_rate = mode->clock * 1000;
+	inno_hdmi_i2c_init(hdmi);
+
+	/* Unmute video and audio output */
+	hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
+		  v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0));
+
+	return 0;
+}
+
+static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adj_mode)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+	inno_hdmi_setup(hdmi, adj_mode);
+
+	/* Store the display mode for plugin/DPMS poweron events */
+	memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+}
+
+static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+	rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
+				      ROCKCHIP_OUT_MODE_P888);
+
+	inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+}
+
+static void inno_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+	inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR);
+}
+
+static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+					 const struct drm_display_mode *mode,
+					 struct drm_display_mode *adj_mode)
+{
+	return true;
+}
+
+static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
+	.enable     = inno_hdmi_encoder_enable,
+	.disable    = inno_hdmi_encoder_disable,
+	.mode_fixup = inno_hdmi_encoder_mode_fixup,
+	.mode_set   = inno_hdmi_encoder_mode_set,
+};
+
+static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+static enum drm_connector_status
+inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+
+	return (hdmi_readb(hdmi, HDMI_STATUS) & m_HOTPLUG) ?
+		connector_status_connected : connector_status_disconnected;
+}
+
+static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+	struct edid *edid;
+	int ret = 0;
+
+	if (!hdmi->ddc)
+		return 0;
+
+	edid = drm_get_edid(connector, hdmi->ddc);
+	if (edid) {
+		hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+		hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return ret;
+}
+
+static enum drm_mode_status
+inno_hdmi_connector_mode_valid(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+inno_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+	struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+
+	return &hdmi->encoder;
+}
+
+static int
+inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+				       uint32_t maxX, uint32_t maxY)
+{
+	return drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+}
+
+static void inno_hdmi_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs inno_hdmi_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = inno_hdmi_probe_single_connector_modes,
+	.detect = inno_hdmi_connector_detect,
+	.destroy = inno_hdmi_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
+	.get_modes = inno_hdmi_connector_get_modes,
+	.mode_valid = inno_hdmi_connector_mode_valid,
+	.best_encoder = inno_hdmi_connector_best_encoder,
+};
+
+static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
+{
+	struct drm_encoder *encoder = &hdmi->encoder;
+	struct device *dev = hdmi->dev;
+
+	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+
+	/*
+	 * If we failed to find the CRTC(s) which this encoder is
+	 * supposed to be connected to, it's because the CRTC has
+	 * not been registered yet.  Defer probing, and hope that
+	 * the required CRTC is added later.
+	 */
+	if (encoder->possible_crtcs == 0)
+		return -EPROBE_DEFER;
+
+	drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
+	drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS, NULL);
+
+	hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+	drm_connector_helper_add(&hdmi->connector,
+				 &inno_hdmi_connector_helper_funcs);
+	drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
+			   DRM_MODE_CONNECTOR_HDMIA);
+
+	drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+	return 0;
+}
+
+static irqreturn_t inno_hdmi_i2c_irq(struct inno_hdmi *hdmi)
+{
+	struct inno_hdmi_i2c *i2c = hdmi->i2c;
+	u8 stat;
+
+	stat = hdmi_readb(hdmi, HDMI_INTERRUPT_STATUS1);
+	if (!(stat & m_INT_EDID_READY))
+		return IRQ_NONE;
+
+	/* Clear HDMI EDID interrupt flag */
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+
+	complete(&i2c->cmp);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t inno_hdmi_hardirq(int irq, void *dev_id)
+{
+	struct inno_hdmi *hdmi = dev_id;
+	irqreturn_t ret = IRQ_NONE;
+	u8 interrupt;
+
+	if (hdmi->i2c)
+		ret = inno_hdmi_i2c_irq(hdmi);
+
+	interrupt = hdmi_readb(hdmi, HDMI_STATUS);
+	if (interrupt & m_INT_HOTPLUG) {
+		hdmi_modb(hdmi, HDMI_STATUS, m_INT_HOTPLUG, m_INT_HOTPLUG);
+		ret = IRQ_WAKE_THREAD;
+	}
+
+	return ret;
+}
+
+static irqreturn_t inno_hdmi_irq(int irq, void *dev_id)
+{
+	struct inno_hdmi *hdmi = dev_id;
+
+	drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+	return IRQ_HANDLED;
+}
+
+static int inno_hdmi_i2c_read(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
+{
+	int length = msgs->len;
+	u8 *buf = msgs->buf;
+	int ret;
+
+	ret = wait_for_completion_timeout(&hdmi->i2c->cmp, HZ / 10);
+	if (!ret)
+		return -EAGAIN;
+
+	while (length--)
+		*buf++ = hdmi_readb(hdmi, HDMI_EDID_FIFO_ADDR);
+
+	return 0;
+}
+
+static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
+{
+	/*
+	 * The DDC module only support read EDID message, so
+	 * we assume that each word write to this i2c adapter
+	 * should be the offset of EDID word address.
+	 */
+	if ((msgs->len != 1) ||
+	    ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
+		return -EINVAL;
+
+	reinit_completion(&hdmi->i2c->cmp);
+
+	if (msgs->addr == DDC_SEGMENT_ADDR)
+		hdmi->i2c->segment_addr = msgs->buf[0];
+	if (msgs->addr == DDC_ADDR)
+		hdmi->i2c->ddc_addr = msgs->buf[0];
+
+	/* Set edid fifo first addr */
+	hdmi_writeb(hdmi, HDMI_EDID_FIFO_OFFSET, 0x00);
+
+	/* Set edid word address 0x00/0x80 */
+	hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
+
+	/* Set edid segment pointer */
+	hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
+
+	return 0;
+}
+
+static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap,
+			      struct i2c_msg *msgs, int num)
+{
+	struct inno_hdmi *hdmi = i2c_get_adapdata(adap);
+	struct inno_hdmi_i2c *i2c = hdmi->i2c;
+	int i, ret = 0;
+
+	mutex_lock(&i2c->lock);
+
+	/* Clear the EDID interrupt flag and unmute the interrupt */
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, m_INT_EDID_READY);
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+
+	for (i = 0; i < num; i++) {
+		dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+			i + 1, num, msgs[i].len, msgs[i].flags);
+
+		if (msgs[i].flags & I2C_M_RD)
+			ret = inno_hdmi_i2c_read(hdmi, &msgs[i]);
+		else
+			ret = inno_hdmi_i2c_write(hdmi, &msgs[i]);
+
+		if (ret < 0)
+			break;
+	}
+
+	if (!ret)
+		ret = num;
+
+	/* Mute HDMI EDID interrupt */
+	hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
+
+	mutex_unlock(&i2c->lock);
+
+	return ret;
+}
+
+static u32 inno_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm inno_hdmi_algorithm = {
+	.master_xfer	= inno_hdmi_i2c_xfer,
+	.functionality	= inno_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
+{
+	struct i2c_adapter *adap;
+	struct inno_hdmi_i2c *i2c;
+	int ret;
+
+	i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+	if (!i2c)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&i2c->lock);
+	init_completion(&i2c->cmp);
+
+	adap = &i2c->adap;
+	adap->class = I2C_CLASS_DDC;
+	adap->owner = THIS_MODULE;
+	adap->dev.parent = hdmi->dev;
+	adap->dev.of_node = hdmi->dev->of_node;
+	adap->algo = &inno_hdmi_algorithm;
+	strlcpy(adap->name, "Inno HDMI", sizeof(adap->name));
+	i2c_set_adapdata(adap, hdmi);
+
+	ret = i2c_add_adapter(adap);
+	if (ret) {
+		dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+		devm_kfree(hdmi->dev, i2c);
+		return ERR_PTR(ret);
+	}
+
+	hdmi->i2c = i2c;
+
+	dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+	return adap;
+}
+
+static int inno_hdmi_bind(struct device *dev, struct device *master,
+				 void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *drm = data;
+	struct inno_hdmi *hdmi;
+	struct resource *iores;
+	int irq;
+	int ret;
+
+	hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+	if (!hdmi)
+		return -ENOMEM;
+
+	hdmi->dev = dev;
+	hdmi->drm_dev = drm;
+
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -ENXIO;
+
+	hdmi->regs = devm_ioremap_resource(dev, iores);
+	if (IS_ERR(hdmi->regs))
+		return PTR_ERR(hdmi->regs);
+
+	hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
+	if (IS_ERR(hdmi->pclk)) {
+		dev_err(hdmi->dev, "Unable to get HDMI pclk clk\n");
+		return PTR_ERR(hdmi->pclk);
+	}
+
+	ret = clk_prepare_enable(hdmi->pclk);
+	if (ret) {
+		dev_err(hdmi->dev, "Cannot enable HDMI pclk clock: %d\n", ret);
+		return ret;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	inno_hdmi_reset(hdmi);
+
+	hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
+	if (IS_ERR(hdmi->ddc)) {
+		hdmi->ddc = NULL;
+		return PTR_ERR(hdmi->ddc);
+	}
+
+	/*
+	 * When IP controller haven't configured to an accurate video
+	 * timing, then the TMDS clock source would be switched to
+	 * PCLK_HDMI, so we need to init the TMDS rate to PCLK rate,
+	 * and reconfigure the DDC clock.
+	 */
+	hdmi->tmds_rate = clk_get_rate(hdmi->pclk);
+	inno_hdmi_i2c_init(hdmi);
+
+	ret = inno_hdmi_register(drm, hdmi);
+	if (ret)
+		return ret;
+
+	dev_set_drvdata(dev, hdmi);
+
+	/* Unmute hotplug interrupt */
+	hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
+
+	ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
+					inno_hdmi_irq, IRQF_SHARED,
+					dev_name(dev), hdmi);
+
+	return ret;
+}
+
+static void inno_hdmi_unbind(struct device *dev, struct device *master,
+			     void *data)
+{
+	struct inno_hdmi *hdmi = dev_get_drvdata(dev);
+
+	hdmi->connector.funcs->destroy(&hdmi->connector);
+	hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+	clk_disable_unprepare(hdmi->pclk);
+	i2c_put_adapter(hdmi->ddc);
+}
+
+static const struct component_ops inno_hdmi_ops = {
+	.bind	= inno_hdmi_bind,
+	.unbind	= inno_hdmi_unbind,
+};
+
+static int inno_hdmi_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &inno_hdmi_ops);
+}
+
+static int inno_hdmi_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &inno_hdmi_ops);
+
+	return 0;
+}
+
+static const struct of_device_id inno_hdmi_dt_ids[] = {
+	{ .compatible = "rockchip,rk3036-inno-hdmi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
+
+static struct platform_driver inno_hdmi_driver = {
+	.probe  = inno_hdmi_probe,
+	.remove = inno_hdmi_remove,
+	.driver = {
+		.name = "innohdmi-rockchip",
+		.of_match_table = inno_hdmi_dt_ids,
+	},
+};
+
+module_platform_driver(inno_hdmi_driver);
+
+MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific INNO-HDMI Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:innohdmi-rockchip");
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
new file mode 100644
index 0000000..aa7c415
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ *    Zheng Yang <zhengyang@rock-chips.com>
+ *    Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INNO_HDMI_H__
+#define __INNO_HDMI_H__
+
+#define DDC_SEGMENT_ADDR		0x30
+
+enum PWR_MODE {
+	NORMAL,
+	LOWER_PWR,
+};
+
+#define HDMI_SCL_RATE			(100*1000)
+#define DDC_BUS_FREQ_L			0x4b
+#define DDC_BUS_FREQ_H			0x4c
+
+#define HDMI_SYS_CTRL			0x00
+#define m_RST_ANALOG			(1 << 6)
+#define v_RST_ANALOG			(0 << 6)
+#define v_NOT_RST_ANALOG		(1 << 6)
+#define m_RST_DIGITAL			(1 << 5)
+#define v_RST_DIGITAL			(0 << 5)
+#define v_NOT_RST_DIGITAL		(1 << 5)
+#define m_REG_CLK_INV			(1 << 4)
+#define v_REG_CLK_NOT_INV		(0 << 4)
+#define v_REG_CLK_INV			(1 << 4)
+#define m_VCLK_INV			(1 << 3)
+#define v_VCLK_NOT_INV			(0 << 3)
+#define v_VCLK_INV			(1 << 3)
+#define m_REG_CLK_SOURCE		(1 << 2)
+#define v_REG_CLK_SOURCE_TMDS		(0 << 2)
+#define v_REG_CLK_SOURCE_SYS		(1 << 2)
+#define m_POWER				(1 << 1)
+#define v_PWR_ON			(0 << 1)
+#define v_PWR_OFF			(1 << 1)
+#define m_INT_POL			(1 << 0)
+#define v_INT_POL_HIGH			1
+#define v_INT_POL_LOW			0
+
+#define HDMI_VIDEO_CONTRL1		0x01
+#define m_VIDEO_INPUT_FORMAT		(7 << 1)
+#define m_DE_SOURCE			(1 << 0)
+#define v_VIDEO_INPUT_FORMAT(n)		(n << 1)
+#define v_DE_EXTERNAL			1
+#define v_DE_INTERNAL			0
+enum {
+	VIDEO_INPUT_SDR_RGB444 = 0,
+	VIDEO_INPUT_DDR_RGB444 = 5,
+	VIDEO_INPUT_DDR_YCBCR422 = 6
+};
+
+#define HDMI_VIDEO_CONTRL2		0x02
+#define m_VIDEO_OUTPUT_COLOR		(3 << 6)
+#define m_VIDEO_INPUT_BITS		(3 << 4)
+#define m_VIDEO_INPUT_CSP		(1 << 0)
+#define v_VIDEO_OUTPUT_COLOR(n)		(((n) & 0x3) << 6)
+#define v_VIDEO_INPUT_BITS(n)		(n << 4)
+#define v_VIDEO_INPUT_CSP(n)		(n << 0)
+enum {
+	VIDEO_INPUT_12BITS = 0,
+	VIDEO_INPUT_10BITS = 1,
+	VIDEO_INPUT_REVERT = 2,
+	VIDEO_INPUT_8BITS = 3,
+};
+
+#define HDMI_VIDEO_CONTRL		0x03
+#define m_VIDEO_AUTO_CSC		(1 << 7)
+#define v_VIDEO_AUTO_CSC(n)		(n << 7)
+#define m_VIDEO_C0_C2_SWAP		(1 << 0)
+#define v_VIDEO_C0_C2_SWAP(n)		(n << 0)
+enum {
+	C0_C2_CHANGE_ENABLE = 0,
+	C0_C2_CHANGE_DISABLE = 1,
+	AUTO_CSC_DISABLE = 0,
+	AUTO_CSC_ENABLE = 1,
+};
+
+#define HDMI_VIDEO_CONTRL3		0x04
+#define m_COLOR_DEPTH_NOT_INDICATED	(1 << 4)
+#define m_SOF				(1 << 3)
+#define m_COLOR_RANGE			(1 << 2)
+#define m_CSC				(1 << 0)
+#define v_COLOR_DEPTH_NOT_INDICATED(n)	((n) << 4)
+#define v_SOF_ENABLE			(0 << 3)
+#define v_SOF_DISABLE			(1 << 3)
+#define v_COLOR_RANGE_FULL		(1 << 2)
+#define v_COLOR_RANGE_LIMITED		(0 << 2)
+#define v_CSC_ENABLE			1
+#define v_CSC_DISABLE			0
+
+#define HDMI_AV_MUTE			0x05
+#define m_AVMUTE_CLEAR			(1 << 7)
+#define m_AVMUTE_ENABLE			(1 << 6)
+#define m_AUDIO_MUTE			(1 << 1)
+#define m_VIDEO_BLACK			(1 << 0)
+#define v_AVMUTE_CLEAR(n)		(n << 7)
+#define v_AVMUTE_ENABLE(n)		(n << 6)
+#define v_AUDIO_MUTE(n)			(n << 1)
+#define v_VIDEO_MUTE(n)			(n << 0)
+
+#define HDMI_VIDEO_TIMING_CTL		0x08
+#define v_HSYNC_POLARITY(n)		(n << 3)
+#define v_VSYNC_POLARITY(n)		(n << 2)
+#define v_INETLACE(n)			(n << 1)
+#define v_EXTERANL_VIDEO(n)		(n << 0)
+
+#define HDMI_VIDEO_EXT_HTOTAL_L		0x09
+#define HDMI_VIDEO_EXT_HTOTAL_H		0x0a
+#define HDMI_VIDEO_EXT_HBLANK_L		0x0b
+#define HDMI_VIDEO_EXT_HBLANK_H		0x0c
+#define HDMI_VIDEO_EXT_HDELAY_L		0x0d
+#define HDMI_VIDEO_EXT_HDELAY_H		0x0e
+#define HDMI_VIDEO_EXT_HDURATION_L	0x0f
+#define HDMI_VIDEO_EXT_HDURATION_H	0x10
+#define HDMI_VIDEO_EXT_VTOTAL_L		0x11
+#define HDMI_VIDEO_EXT_VTOTAL_H		0x12
+#define HDMI_VIDEO_EXT_VBLANK		0x13
+#define HDMI_VIDEO_EXT_VDELAY		0x14
+#define HDMI_VIDEO_EXT_VDURATION	0x15
+
+#define HDMI_VIDEO_CSC_COEF		0x18
+
+#define HDMI_AUDIO_CTRL1		0x35
+enum {
+	CTS_SOURCE_INTERNAL = 0,
+	CTS_SOURCE_EXTERNAL = 1,
+};
+#define v_CTS_SOURCE(n)			(n << 7)
+
+enum {
+	DOWNSAMPLE_DISABLE = 0,
+	DOWNSAMPLE_1_2 = 1,
+	DOWNSAMPLE_1_4 = 2,
+};
+#define v_DOWN_SAMPLE(n)		(n << 5)
+
+enum {
+	AUDIO_SOURCE_IIS = 0,
+	AUDIO_SOURCE_SPDIF = 1,
+};
+#define v_AUDIO_SOURCE(n)		(n << 3)
+
+#define v_MCLK_ENABLE(n)		(n << 2)
+enum {
+	MCLK_128FS = 0,
+	MCLK_256FS = 1,
+	MCLK_384FS = 2,
+	MCLK_512FS = 3,
+};
+#define v_MCLK_RATIO(n)			(n)
+
+#define AUDIO_SAMPLE_RATE		0x37
+enum {
+	AUDIO_32K = 0x3,
+	AUDIO_441K = 0x0,
+	AUDIO_48K = 0x2,
+	AUDIO_882K = 0x8,
+	AUDIO_96K = 0xa,
+	AUDIO_1764K = 0xc,
+	AUDIO_192K = 0xe,
+};
+
+#define AUDIO_I2S_MODE			0x38
+enum {
+	I2S_CHANNEL_1_2 = 1,
+	I2S_CHANNEL_3_4 = 3,
+	I2S_CHANNEL_5_6 = 7,
+	I2S_CHANNEL_7_8 = 0xf
+};
+#define v_I2S_CHANNEL(n)		((n) << 2)
+enum {
+	I2S_STANDARD = 0,
+	I2S_LEFT_JUSTIFIED = 1,
+	I2S_RIGHT_JUSTIFIED = 2,
+};
+#define v_I2S_MODE(n)			(n)
+
+#define AUDIO_I2S_MAP			0x39
+#define AUDIO_I2S_SWAPS_SPDIF		0x3a
+#define v_SPIDF_FREQ(n)			(n)
+
+#define N_32K				0x1000
+#define N_441K				0x1880
+#define N_882K				0x3100
+#define N_1764K				0x6200
+#define N_48K				0x1800
+#define N_96K				0x3000
+#define N_192K				0x6000
+
+#define HDMI_AUDIO_CHANNEL_STATUS	0x3e
+#define m_AUDIO_STATUS_NLPCM		(1 << 7)
+#define m_AUDIO_STATUS_USE		(1 << 6)
+#define m_AUDIO_STATUS_COPYRIGHT	(1 << 5)
+#define m_AUDIO_STATUS_ADDITION		(3 << 2)
+#define m_AUDIO_STATUS_CLK_ACCURACY	(2 << 0)
+#define v_AUDIO_STATUS_NLPCM(n)		((n & 1) << 7)
+#define AUDIO_N_H			0x3f
+#define AUDIO_N_M			0x40
+#define AUDIO_N_L			0x41
+
+#define HDMI_AUDIO_CTS_H		0x45
+#define HDMI_AUDIO_CTS_M		0x46
+#define HDMI_AUDIO_CTS_L		0x47
+
+#define HDMI_DDC_CLK_L			0x4b
+#define HDMI_DDC_CLK_H			0x4c
+
+#define HDMI_EDID_SEGMENT_POINTER	0x4d
+#define HDMI_EDID_WORD_ADDR		0x4e
+#define HDMI_EDID_FIFO_OFFSET		0x4f
+#define HDMI_EDID_FIFO_ADDR		0x50
+
+#define HDMI_PACKET_SEND_MANUAL		0x9c
+#define HDMI_PACKET_SEND_AUTO		0x9d
+#define m_PACKET_GCP_EN			(1 << 7)
+#define m_PACKET_MSI_EN			(1 << 6)
+#define m_PACKET_SDI_EN			(1 << 5)
+#define m_PACKET_VSI_EN			(1 << 4)
+#define v_PACKET_GCP_EN(n)		((n & 1) << 7)
+#define v_PACKET_MSI_EN(n)		((n & 1) << 6)
+#define v_PACKET_SDI_EN(n)		((n & 1) << 5)
+#define v_PACKET_VSI_EN(n)		((n & 1) << 4)
+
+#define HDMI_CONTROL_PACKET_BUF_INDEX	0x9f
+enum {
+	INFOFRAME_VSI = 0x05,
+	INFOFRAME_AVI = 0x06,
+	INFOFRAME_AAI = 0x08,
+};
+
+#define HDMI_CONTROL_PACKET_ADDR	0xa0
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE	0x11
+enum {
+	AVI_COLOR_MODE_RGB = 0,
+	AVI_COLOR_MODE_YCBCR422 = 1,
+	AVI_COLOR_MODE_YCBCR444 = 2,
+	AVI_COLORIMETRY_NO_DATA = 0,
+
+	AVI_COLORIMETRY_SMPTE_170M = 1,
+	AVI_COLORIMETRY_ITU709 = 2,
+	AVI_COLORIMETRY_EXTENDED = 3,
+
+	AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
+	AVI_CODED_FRAME_ASPECT_4_3 = 1,
+	AVI_CODED_FRAME_ASPECT_16_9 = 2,
+
+	ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
+	ACTIVE_ASPECT_RATE_4_3 = 0x09,
+	ACTIVE_ASPECT_RATE_16_9 = 0x0A,
+	ACTIVE_ASPECT_RATE_14_9 = 0x0B,
+};
+
+#define HDMI_HDCP_CTRL			0x52
+#define m_HDMI_DVI			(1 << 1)
+#define v_HDMI_DVI(n)			(n << 1)
+
+#define HDMI_INTERRUPT_MASK1		0xc0
+#define HDMI_INTERRUPT_STATUS1		0xc1
+#define	m_INT_ACTIVE_VSYNC		(1 << 5)
+#define m_INT_EDID_READY		(1 << 2)
+
+#define HDMI_INTERRUPT_MASK2		0xc2
+#define HDMI_INTERRUPT_STATUS2		0xc3
+#define m_INT_HDCP_ERR			(1 << 7)
+#define m_INT_BKSV_FLAG			(1 << 6)
+#define m_INT_HDCP_OK			(1 << 4)
+
+#define HDMI_STATUS			0xc8
+#define m_HOTPLUG			(1 << 7)
+#define m_MASK_INT_HOTPLUG		(1 << 5)
+#define m_INT_HOTPLUG			(1 << 1)
+#define v_MASK_INT_HOTPLUG(n)		((n & 0x1) << 5)
+
+#define HDMI_COLORBAR                   0xc9
+
+#define HDMI_PHY_SYNC			0xce
+#define HDMI_PHY_SYS_CTL		0xe0
+#define m_TMDS_CLK_SOURCE		(1 << 5)
+#define v_TMDS_FROM_PLL			(0 << 5)
+#define v_TMDS_FROM_GEN			(1 << 5)
+#define m_PHASE_CLK			(1 << 4)
+#define v_DEFAULT_PHASE			(0 << 4)
+#define v_SYNC_PHASE			(1 << 4)
+#define m_TMDS_CURRENT_PWR		(1 << 3)
+#define v_TURN_ON_CURRENT		(0 << 3)
+#define v_CAT_OFF_CURRENT		(1 << 3)
+#define m_BANDGAP_PWR			(1 << 2)
+#define v_BANDGAP_PWR_UP		(0 << 2)
+#define v_BANDGAP_PWR_DOWN		(1 << 2)
+#define m_PLL_PWR			(1 << 1)
+#define v_PLL_PWR_UP			(0 << 1)
+#define v_PLL_PWR_DOWN			(1 << 1)
+#define m_TMDS_CHG_PWR			(1 << 0)
+#define v_TMDS_CHG_PWR_UP		(0 << 0)
+#define v_TMDS_CHG_PWR_DOWN		(1 << 0)
+
+#define HDMI_PHY_CHG_PWR		0xe1
+#define v_CLK_CHG_PWR(n)		((n & 1) << 3)
+#define v_DATA_CHG_PWR(n)		((n & 7) << 0)
+
+#define HDMI_PHY_DRIVER			0xe2
+#define v_CLK_MAIN_DRIVER(n)		(n << 4)
+#define v_DATA_MAIN_DRIVER(n)		(n << 0)
+
+#define HDMI_PHY_PRE_EMPHASIS		0xe3
+#define v_PRE_EMPHASIS(n)		((n & 7) << 4)
+#define v_CLK_PRE_DRIVER(n)		((n & 3) << 2)
+#define v_DATA_PRE_DRIVER(n)		((n & 3) << 0)
+
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW		0xe7
+#define v_FEEDBACK_DIV_LOW(n)			(n & 0xff)
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH	0xe8
+#define v_FEEDBACK_DIV_HIGH(n)			(n & 1)
+
+#define HDMI_PHY_PRE_DIV_RATIO		0xed
+#define v_PRE_DIV_RATIO(n)		(n & 0x1f)
+
+#define HDMI_CEC_CTRL			0xd0
+#define m_ADJUST_FOR_HISENSE		(1 << 6)
+#define m_REJECT_RX_BROADCAST		(1 << 5)
+#define m_BUSFREETIME_ENABLE		(1 << 2)
+#define m_REJECT_RX			(1 << 1)
+#define m_START_TX			(1 << 0)
+
+#define HDMI_CEC_DATA			0xd1
+#define HDMI_CEC_TX_OFFSET		0xd2
+#define HDMI_CEC_RX_OFFSET		0xd3
+#define HDMI_CEC_CLK_H			0xd4
+#define HDMI_CEC_CLK_L			0xd5
+#define HDMI_CEC_TX_LENGTH		0xd6
+#define HDMI_CEC_RX_LENGTH		0xd7
+#define HDMI_CEC_TX_INT_MASK		0xd8
+#define m_TX_DONE			(1 << 3)
+#define m_TX_NOACK			(1 << 2)
+#define m_TX_BROADCAST_REJ		(1 << 1)
+#define m_TX_BUSNOTFREE			(1 << 0)
+
+#define HDMI_CEC_RX_INT_MASK		0xd9
+#define m_RX_LA_ERR			(1 << 4)
+#define m_RX_GLITCH			(1 << 3)
+#define m_RX_DONE			(1 << 0)
+
+#define HDMI_CEC_TX_INT			0xda
+#define HDMI_CEC_RX_INT			0xdb
+#define HDMI_CEC_BUSFREETIME_L		0xdc
+#define HDMI_CEC_BUSFREETIME_H		0xdd
+#define HDMI_CEC_LOGICADDR		0xde
+
+#endif /* __INNO_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a0d51cc..896da09 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -384,36 +384,6 @@
 				rockchip_drm_sys_resume)
 };
 
-/*
- * @node: device tree node containing encoder input ports
- * @encoder: drm_encoder
- */
-int rockchip_drm_encoder_get_mux_id(struct device_node *node,
-				    struct drm_encoder *encoder)
-{
-	struct device_node *ep;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct of_endpoint endpoint;
-	struct device_node *port;
-	int ret;
-
-	if (!node || !crtc)
-		return -EINVAL;
-
-	for_each_endpoint_of_node(node, ep) {
-		port = of_graph_get_remote_port(ep);
-		of_node_put(port);
-		if (port == crtc->port) {
-			ret = of_graph_parse_endpoint(ep, &endpoint);
-			of_node_put(ep);
-			return ret ?: endpoint.id;
-		}
-	}
-
-	return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(rockchip_drm_encoder_get_mux_id);
-
 static int compare_of(struct device *dev, void *data)
 {
 	struct device_node *np = data;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index bb8b076..3529f69 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -67,8 +67,6 @@
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
 				 const struct rockchip_crtc_funcs *crtc_funcs);
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
-int rockchip_drm_encoder_get_mux_id(struct device_node *node,
-				    struct drm_encoder *encoder);
 int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
 				  int out_mode);
 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index db07637..88643ab 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -359,13 +359,6 @@
 	scrtc->dpms = mode;
 }
 
-static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-				      const struct drm_display_mode *mode,
-				      struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
 {
 	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -431,33 +424,12 @@
 
 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
 	.dpms = shmob_drm_crtc_dpms,
-	.mode_fixup = shmob_drm_crtc_mode_fixup,
 	.prepare = shmob_drm_crtc_mode_prepare,
 	.commit = shmob_drm_crtc_mode_commit,
 	.mode_set = shmob_drm_crtc_mode_set,
 	.mode_set_base = shmob_drm_crtc_mode_set_base,
 };
 
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
-				     struct drm_file *file)
-{
-	struct drm_pending_vblank_event *event;
-	struct drm_device *dev = scrtc->crtc.dev;
-	unsigned long flags;
-
-	/* Destroy the pending vertical blanking event associated with the
-	 * pending page flip, if any, and disable vertical blanking interrupts.
-	 */
-	spin_lock_irqsave(&dev->event_lock, flags);
-	event = scrtc->event;
-	if (event && event->base.file_priv == file) {
-		scrtc->event = NULL;
-		event->base.destroy(&event->base);
-		drm_vblank_put(dev, 0);
-	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
 {
 	struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index eddad6d..38ed4ff 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -47,8 +47,6 @@
 
 int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
 void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
-				     struct drm_file *file);
 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
 void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
 void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 04e66e3..7700ff1 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -200,13 +200,6 @@
 	return ret;
 }
 
-static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct shmob_drm_device *sdev = dev->dev_private;
-
-	shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
-}
-
 static irqreturn_t shmob_drm_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -266,7 +259,6 @@
 				| DRIVER_PRIME,
 	.load			= shmob_drm_load,
 	.unload			= shmob_drm_unload,
-	.preclose		= shmob_drm_preclose,
 	.set_busid		= drm_platform_set_busid,
 	.irq_handler		= shmob_drm_irq,
 	.get_vblank_counter	= drm_vblank_no_hw_counter,
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index 00d0698..a516eb8 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -7,6 +7,7 @@
 #include "sti_awg_utils.h"
 
 #define AWG_OPCODE_OFFSET 10
+#define AWG_MAX_ARG       0x3ff
 
 enum opcode {
 	SET,
@@ -34,6 +35,8 @@
 	/* skip, repeat and replay arg should not exceed 1023.
 	 * If user wants to exceed this value, the instruction should be
 	 * duplicate and arg should be adjust for each duplicated instruction.
+	 *
+	 * mux_sel is used in case of SAV/EAV synchronization.
 	 */
 
 	while (arg_tmp > 0) {
@@ -65,7 +68,7 @@
 
 			mux = 0;
 			data_enable = 0;
-			arg &= (0x3ff);
+			arg &= AWG_MAX_ARG;
 			break;
 		case REPEAT:
 		case REPLAY:
@@ -76,13 +79,13 @@
 
 			mux = 0;
 			data_enable = 0;
-			arg &= (0x3ff);
+			arg &= AWG_MAX_ARG;
 			break;
 		case JUMP:
 			mux = 0;
 			data_enable = 0;
 			arg |= 0x40; /* for jump instruction 7th bit is 1 */
-			arg &= 0x3ff;
+			arg &= AWG_MAX_ARG;
 			break;
 		case STOP:
 			arg = 0;
@@ -110,68 +113,75 @@
 	return 0;
 }
 
-int sti_awg_generate_code_data_enable_mode(
+static int awg_generate_line_signal(
 		struct awg_code_generation_params *fwparams,
 		struct awg_timing *timing)
 {
 	long int val;
-	long int data_en;
+	int ret = 0;
+
+	if (timing->trailing_pixels > 0) {
+		/* skip trailing pixel */
+		val = timing->blanking_level;
+		ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
+
+		val = timing->trailing_pixels - 1;
+		ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
+	}
+
+	/* set DE signal high */
+	val = timing->blanking_level;
+	ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
+			val, 0, 1, fwparams);
+
+	if (timing->blanking_pixels > 0) {
+		/* skip the number of active pixel */
+		val = timing->active_pixels - 1;
+		ret |= awg_generate_instr(SKIP, val, 0, 1, fwparams);
+
+		/* set DE signal low */
+		val = timing->blanking_level;
+		ret |= awg_generate_instr(SET, val, 0, 0, fwparams);
+	}
+
+	return ret;
+}
+
+int sti_awg_generate_code_data_enable_mode(
+		struct awg_code_generation_params *fwparams,
+		struct awg_timing *timing)
+{
+	long int val, tmp_val;
 	int ret = 0;
 
 	if (timing->trailing_lines > 0) {
 		/* skip trailing lines */
 		val = timing->blanking_level;
-		data_en = 0;
-		ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+		ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
 
 		val = timing->trailing_lines - 1;
-		data_en = 0;
-		ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+		ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
 	}
 
-	if (timing->trailing_pixels > 0) {
-		/* skip trailing pixel */
-		val = timing->blanking_level;
-		data_en = 0;
-		ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+	tmp_val = timing->active_lines - 1;
 
-		val = timing->trailing_pixels - 1;
-		data_en = 0;
-		ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+	while (tmp_val > 0) {
+		/* generate DE signal for each line */
+		ret |= awg_generate_line_signal(fwparams, timing);
+		/* replay the sequence as many active lines defined */
+		ret |= awg_generate_instr(REPLAY,
+					  min_t(int, AWG_MAX_ARG, tmp_val),
+					  0, 0, fwparams);
+		tmp_val -= AWG_MAX_ARG;
 	}
 
-	/* set DE signal high */
-	val = timing->blanking_level;
-	data_en = 1;
-	ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
-			val, 0, data_en, fwparams);
-
-	if (timing->blanking_pixels > 0) {
-		/* skip the number of active pixel */
-		val = timing->active_pixels - 1;
-		data_en = 1;
-		ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
-
-		/* set DE signal low */
-		val = timing->blanking_level;
-		data_en = 0;
-		ret |= awg_generate_instr(SET, val, 0, data_en, fwparams);
-	}
-
-	/* replay the sequence as many active lines defined */
-	val = timing->active_lines - 1;
-	data_en = 0;
-	ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
-
 	if (timing->blanking_lines > 0) {
 		/* skip blanking lines */
 		val = timing->blanking_level;
-		data_en = 0;
-		ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+		ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
 
 		val = timing->blanking_lines - 1;
-		data_en = 0;
-		ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+		ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index afed217..3d2fa3a 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -75,13 +75,13 @@
 		switch (desc[i].type) {
 		case STI_VID_SUBDEV:
 			compo->vid[vid_id++] =
-			    sti_vid_create(compo->dev, desc[i].id,
+			    sti_vid_create(compo->dev, drm_dev, desc[i].id,
 					   compo->regs + desc[i].offset);
 			break;
 		case STI_MIXER_MAIN_SUBDEV:
 		case STI_MIXER_AUX_SUBDEV:
 			compo->mixer[mixer_id++] =
-			    sti_mixer_create(compo->dev, desc[i].id,
+			    sti_mixer_create(compo->dev, drm_dev, desc[i].id,
 					     compo->regs + desc[i].offset);
 			break;
 		case STI_GPD_SUBDEV:
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index de11c7c..505620c 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -56,6 +56,7 @@
 				struct drm_display_mode *adjusted_mode)
 {
 	/* accept the provided drm_display_mode, do not fix it up */
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
 	return true;
 }
 
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index bd736ac..3abb400 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -5,12 +5,10 @@
  *          for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  */
-#include <drm/drmP.h>
 
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
 
 #include "sti_compositor.h"
 #include "sti_cursor.h"
@@ -74,6 +72,82 @@
 
 #define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
 
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(cursor->regs + reg))
+
+static void cursor_dbg_vpo(struct seq_file *s, u32 val)
+{
+	seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void cursor_dbg_size(struct seq_file *s, u32 val)
+{
+	seq_printf(s, "\t%d x %d", val & 0x07FF, (val >> 16) & 0x07FF);
+}
+
+static void cursor_dbg_pml(struct seq_file *s,
+			   struct sti_cursor *cursor, u32 val)
+{
+	if (cursor->pixmap.paddr == val)
+		seq_printf(s, "\tVirt @: %p", cursor->pixmap.base);
+}
+
+static void cursor_dbg_cml(struct seq_file *s,
+			   struct sti_cursor *cursor, u32 val)
+{
+	if (cursor->clut_paddr == val)
+		seq_printf(s, "\tVirt @: %p", cursor->clut);
+}
+
+static int cursor_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "%s: (vaddr = 0x%p)",
+		   sti_plane_to_str(&cursor->plane), cursor->regs);
+
+	DBGFS_DUMP(CUR_CTL);
+	DBGFS_DUMP(CUR_VPO);
+	cursor_dbg_vpo(s, readl(cursor->regs + CUR_VPO));
+	DBGFS_DUMP(CUR_PML);
+	cursor_dbg_pml(s, cursor, readl(cursor->regs + CUR_PML));
+	DBGFS_DUMP(CUR_PMP);
+	DBGFS_DUMP(CUR_SIZE);
+	cursor_dbg_size(s, readl(cursor->regs + CUR_SIZE));
+	DBGFS_DUMP(CUR_CML);
+	cursor_dbg_cml(s, cursor, readl(cursor->regs + CUR_CML));
+	DBGFS_DUMP(CUR_AWS);
+	DBGFS_DUMP(CUR_AWE);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list cursor_debugfs_files[] = {
+	{ "cursor", cursor_dbg_show, 0, NULL },
+};
+
+static int cursor_debugfs_init(struct sti_cursor *cursor,
+			       struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
+		cursor_debugfs_files[i].data = cursor;
+
+	return drm_debugfs_create_files(cursor_debugfs_files,
+					ARRAY_SIZE(cursor_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
 static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
 {
 	u8  *dst = cursor->pixmap.base;
@@ -110,35 +184,31 @@
 						  (b * 5);
 }
 
-static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
-				     struct drm_plane_state *oldstate)
+static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
+				   struct drm_plane_state *state)
 {
-	struct drm_plane_state *state = drm_plane->state;
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 	struct sti_cursor *cursor = to_sti_cursor(plane);
 	struct drm_crtc *crtc = state->crtc;
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
 	struct drm_framebuffer *fb = state->fb;
-	struct drm_display_mode *mode = &crtc->mode;
-	int dst_x = state->crtc_x;
-	int dst_y = state->crtc_y;
-	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	struct drm_crtc_state *crtc_state;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_w, src_h;
+
+	/* no need for further checks if the plane is being disabled */
+	if (!crtc || !fb)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+	mode = &crtc_state->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
 	/* src_x are in 16.16 format */
-	int src_w = state->src_w >> 16;
-	int src_h = state->src_h >> 16;
-	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
-	struct drm_gem_cma_object *cma_obj;
-	u32 y, x;
-	u32 val;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      drm_plane->base.id, sti_plane_to_str(plane));
-	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
-
-	dev_dbg(cursor->dev, "%s %s\n", __func__,
-		sti_plane_to_str(plane));
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
 
 	if (src_w < STI_CURS_MIN_SIZE ||
 	    src_h < STI_CURS_MIN_SIZE ||
@@ -146,7 +216,7 @@
 	    src_h > STI_CURS_MAX_SIZE) {
 		DRM_ERROR("Invalid cursor size (%dx%d)\n",
 				src_w, src_h);
-		return;
+		return -EINVAL;
 	}
 
 	/* If the cursor size has changed, re-allocated the pixmap */
@@ -168,16 +238,46 @@
 						   GFP_KERNEL | GFP_DMA);
 		if (!cursor->pixmap.base) {
 			DRM_ERROR("Failed to allocate memory for pixmap\n");
-			return;
+			return -EINVAL;
 		}
 	}
 
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
+	if (!drm_fb_cma_get_gem_obj(fb, 0)) {
 		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return;
+		return -EINVAL;
 	}
 
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+
+	return 0;
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_cursor *cursor = to_sti_cursor(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y;
+	struct drm_gem_cma_object *cma_obj;
+	u32 y, x;
+	u32 val;
+
+	if (!crtc || !fb)
+		return;
+
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+
 	/* Convert ARGB8888 to CLUT8 */
 	sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
 
@@ -191,21 +291,21 @@
 	val = y << 16 | x;
 	writel(val, cursor->regs + CUR_AWE);
 
-	if (first_prepare) {
-		/* Set and fetch CLUT */
-		writel(cursor->clut_paddr, cursor->regs + CUR_CML);
-		writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
-	}
-
 	/* Set memory location, size, and position */
 	writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
 	writel(cursor->width, cursor->regs + CUR_PMP);
 	writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
 
 	y = sti_vtg_get_line_number(*mode, dst_y);
-	x = sti_vtg_get_pixel_number(*mode, dst_y);
+	x = sti_vtg_get_pixel_number(*mode, dst_x);
 	writel((y << 16) | x, cursor->regs + CUR_VPO);
 
+	/* Set and fetch CLUT */
+	writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+	writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+
+	sti_plane_update_fps(plane, true, false);
+
 	plane->status = STI_PLANE_UPDATED;
 }
 
@@ -213,7 +313,6 @@
 				      struct drm_plane_state *oldstate)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
-	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
 
 	if (!drm_plane->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -222,13 +321,15 @@
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
 }
 
 static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+	.atomic_check = sti_cursor_atomic_check,
 	.atomic_update = sti_cursor_atomic_update,
 	.atomic_disable = sti_cursor_atomic_disable,
 };
@@ -281,6 +382,9 @@
 
 	sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
 
+	if (cursor_debugfs_init(cursor, drm_dev->primary))
+		DRM_ERROR("CURSOR debugfs setup failed\n");
+
 	return &cursor->plane.drm_plane;
 
 err_plane:
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 506b562..6bd6aba 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -20,6 +20,7 @@
 
 #include "sti_crtc.h"
 #include "sti_drv.h"
+#include "sti_plane.h"
 
 #define DRIVER_NAME	"sti"
 #define DRIVER_DESC	"STMicroelectronics SoC DRM"
@@ -30,6 +31,130 @@
 #define STI_MAX_FB_HEIGHT	4096
 #define STI_MAX_FB_WIDTH	4096
 
+static int sti_drm_fps_get(void *data, u64 *val)
+{
+	struct drm_device *drm_dev = data;
+	struct drm_plane *p;
+	unsigned int i = 0;
+
+	*val = 0;
+	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		*val |= plane->fps_info.output << i;
+		i++;
+	}
+
+	return 0;
+}
+
+static int sti_drm_fps_set(void *data, u64 val)
+{
+	struct drm_device *drm_dev = data;
+	struct drm_plane *p;
+	unsigned int i = 0;
+
+	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		plane->fps_info.output = (val >> i) & 1;
+		i++;
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
+			sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
+
+static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_plane *p;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		seq_printf(s, "%s%s\n",
+			   plane->fps_info.fps_str,
+			   plane->fps_info.fips_str);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list sti_drm_dbg_list[] = {
+	{"fps_get", sti_drm_fps_dbg_show, 0},
+};
+
+static int sti_drm_debugfs_create(struct dentry *root,
+				  struct drm_minor *minor,
+				  const char *name,
+				  const struct file_operations *fops)
+{
+	struct drm_device *dev = minor->dev;
+	struct drm_info_node *node;
+	struct dentry *ent;
+
+	ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *)fops;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int sti_drm_dbg_init(struct drm_minor *minor)
+{
+	int ret;
+
+	ret = drm_debugfs_create_files(sti_drm_dbg_list,
+				       ARRAY_SIZE(sti_drm_dbg_list),
+				       minor->debugfs_root, minor);
+	if (ret)
+		goto err;
+
+	ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
+				     &sti_drm_fps_fops);
+	if (ret)
+		goto err;
+
+	DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
+	return 0;
+err:
+	DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
+	return ret;
+}
+
+void sti_drm_dbg_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(sti_drm_dbg_list,
+				 ARRAY_SIZE(sti_drm_dbg_list), minor);
+
+	drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
+				 1, minor);
+}
+
 static void sti_atomic_schedule(struct sti_private *private,
 				struct drm_atomic_state *state)
 {
@@ -181,18 +306,9 @@
 	.release = drm_release,
 };
 
-static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
-					    struct drm_gem_object *obj,
-					    int flags)
-{
-	/* we want to be able to write in mmapped buffer */
-	flags |= O_RDWR;
-	return drm_gem_prime_export(dev, obj, flags);
-}
-
 static struct drm_driver sti_driver = {
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
-	    DRIVER_GEM | DRIVER_PRIME,
+	    DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
 	.load = sti_load,
 	.gem_free_object = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -207,7 +323,7 @@
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = sti_gem_prime_export,
+	.gem_prime_export = drm_gem_prime_export,
 	.gem_prime_import = drm_gem_prime_import,
 	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
 	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -215,6 +331,9 @@
 	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
 	.gem_prime_mmap = drm_gem_cma_prime_mmap,
 
+	.debugfs_init = sti_drm_dbg_init,
+	.debugfs_cleanup = sti_drm_dbg_cleanup,
+
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
 	.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 45cbe2b..25f7663 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -6,6 +6,7 @@
 
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/debugfs.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
@@ -156,6 +157,69 @@
 	writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
 }
 
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(dvo->regs + reg))
+
+static void dvo_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+	unsigned int i;
+
+	seq_puts(s, "\n\n");
+	seq_puts(s, "  DVO AWG microcode:");
+	for (i = 0; i < AWG_MAX_INST; i++) {
+		if (i % 8 == 0)
+			seq_printf(s, "\n  %04X:", i);
+		seq_printf(s, " %04X", readl(reg + i * 4));
+	}
+}
+
+static int dvo_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
+	DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
+	DBGFS_DUMP(DVO_DOF_CFG);
+	DBGFS_DUMP(DVO_LUT_PROG_LOW);
+	DBGFS_DUMP(DVO_LUT_PROG_MID);
+	DBGFS_DUMP(DVO_LUT_PROG_HIGH);
+	dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list dvo_debugfs_files[] = {
+	{ "dvo", dvo_dbg_show, 0, NULL },
+};
+
+static void dvo_debugfs_exit(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(dvo_debugfs_files,
+				 ARRAY_SIZE(dvo_debugfs_files),
+				 minor);
+}
+
+static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
+		dvo_debugfs_files[i].data = dvo;
+
+	return drm_debugfs_create_files(dvo_debugfs_files,
+					ARRAY_SIZE(dvo_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
 static void sti_dvo_disable(struct drm_bridge *bridge)
 {
 	struct sti_dvo *dvo = bridge->driver_private;
@@ -345,12 +409,14 @@
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (!dvo->panel)
+	if (!dvo->panel) {
 		dvo->panel = of_drm_find_panel(dvo->panel_node);
+		if (dvo->panel)
+			drm_panel_attach(dvo->panel, connector);
+	}
 
 	if (dvo->panel)
-		if (!drm_panel_attach(dvo->panel, connector))
-			return connector_status_connected;
+		return connector_status_connected;
 
 	return connector_status_disconnected;
 }
@@ -453,6 +519,9 @@
 		goto err_sysfs;
 	}
 
+	if (dvo_debugfs_init(dvo, drm_dev->primary))
+		DRM_ERROR("DVO debugfs setup failed\n");
+
 	return 0;
 
 err_sysfs:
@@ -467,6 +536,9 @@
 			   struct device *master, void *data)
 {
 	struct sti_dvo *dvo = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	dvo_debugfs_exit(dvo, drm_dev->primary);
 
 	drm_bridge_remove(dvo->bridge);
 }
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 514551c..ff3d3e7 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -6,9 +6,7 @@
  * License terms:  GNU General Public License (GPL), version 2
  */
 
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-
+#include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
@@ -32,10 +30,23 @@
 #define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB1555    0x06
 #define GDP_ARGB4444    0x07
-#define GDP_CLUT8       0x0B
-#define GDP_YCBR888     0x10
-#define GDP_YCBR422R    0x12
-#define GDP_AYCBR8888   0x15
+
+#define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
+
+static struct gdp_format_to_str {
+	int format;
+	char name[20];
+} gdp_format_to_str[] = {
+		GDP2STR(RGB565),
+		GDP2STR(RGB888),
+		GDP2STR(RGB888_32),
+		GDP2STR(XBGR8888),
+		GDP2STR(ARGB8565),
+		GDP2STR(ARGB8888),
+		GDP2STR(ABGR8888),
+		GDP2STR(ARGB1555),
+		GDP2STR(ARGB4444)
+		};
 
 #define GAM_GDP_CTL_OFFSET      0x00
 #define GAM_GDP_AGC_OFFSET      0x04
@@ -97,6 +108,7 @@
  * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
  * @is_curr_top:        true if the current node processed is the top field
  * @node_list:          array of node list
+ * @vtg:                registered vtg
  */
 struct sti_gdp {
 	struct sti_plane plane;
@@ -108,6 +120,7 @@
 	struct notifier_block vtg_field_nb;
 	bool is_curr_top;
 	struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+	struct sti_vtg *vtg;
 };
 
 #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
@@ -121,12 +134,224 @@
 	DRM_FORMAT_ARGB1555,
 	DRM_FORMAT_RGB565,
 	DRM_FORMAT_RGB888,
-	DRM_FORMAT_AYUV,
-	DRM_FORMAT_YUV444,
-	DRM_FORMAT_VYUY,
-	DRM_FORMAT_C8,
 };
 
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(gdp->regs + reg ## _OFFSET))
+
+static void gdp_dbg_ctl(struct seq_file *s, int val)
+{
+	int i;
+
+	seq_puts(s, "\tColor:");
+	for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
+		if (gdp_format_to_str[i].format == (val & 0x1F)) {
+			seq_printf(s, gdp_format_to_str[i].name);
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(gdp_format_to_str))
+		seq_puts(s, "<UNKNOWN>");
+
+	seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
+}
+
+static void gdp_dbg_vpo(struct seq_file *s, int val)
+{
+	seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_vps(struct seq_file *s, int val)
+{
+	seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_size(struct seq_file *s, int val)
+{
+	seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
+{
+	void *base = NULL;
+	unsigned int i;
+
+	for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+		if (gdp->node_list[i].top_field_paddr == val) {
+			base = gdp->node_list[i].top_field;
+			break;
+		}
+		if (gdp->node_list[i].btm_field_paddr == val) {
+			base = gdp->node_list[i].btm_field;
+			break;
+		}
+	}
+
+	if (base)
+		seq_printf(s, "\tVirt @: %p", base);
+}
+
+static void gdp_dbg_ppt(struct seq_file *s, int val)
+{
+	if (val & GAM_GDP_PPT_IGNORE)
+		seq_puts(s, "\tNot displayed on mixer!");
+}
+
+static void gdp_dbg_mst(struct seq_file *s, int val)
+{
+	if (val & 1)
+		seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int gdp_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+	struct drm_crtc *crtc = drm_plane->crtc;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "%s: (vaddr = 0x%p)",
+		   sti_plane_to_str(&gdp->plane), gdp->regs);
+
+	DBGFS_DUMP(GAM_GDP_CTL);
+	gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
+	DBGFS_DUMP(GAM_GDP_AGC);
+	DBGFS_DUMP(GAM_GDP_VPO);
+	gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
+	DBGFS_DUMP(GAM_GDP_VPS);
+	gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
+	DBGFS_DUMP(GAM_GDP_PML);
+	DBGFS_DUMP(GAM_GDP_PMP);
+	DBGFS_DUMP(GAM_GDP_SIZE);
+	gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
+	DBGFS_DUMP(GAM_GDP_NVN);
+	gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+	DBGFS_DUMP(GAM_GDP_KEY1);
+	DBGFS_DUMP(GAM_GDP_KEY2);
+	DBGFS_DUMP(GAM_GDP_PPT);
+	gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
+	DBGFS_DUMP(GAM_GDP_CML);
+	DBGFS_DUMP(GAM_GDP_MST);
+	gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
+
+	seq_puts(s, "\n\n");
+	if (!crtc)
+		seq_puts(s, "  Not connected to any DRM CRTC\n");
+	else
+		seq_printf(s, "  Connected to DRM CRTC #%d (%s)\n",
+			   crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
+{
+	seq_printf(s, "\t@:0x%p", node);
+	seq_printf(s, "\n\tCTL  0x%08X", node->gam_gdp_ctl);
+	gdp_dbg_ctl(s, node->gam_gdp_ctl);
+	seq_printf(s, "\n\tAGC  0x%08X", node->gam_gdp_agc);
+	seq_printf(s, "\n\tVPO  0x%08X", node->gam_gdp_vpo);
+	gdp_dbg_vpo(s, node->gam_gdp_vpo);
+	seq_printf(s, "\n\tVPS  0x%08X", node->gam_gdp_vps);
+	gdp_dbg_vps(s, node->gam_gdp_vps);
+	seq_printf(s, "\n\tPML  0x%08X", node->gam_gdp_pml);
+	seq_printf(s, "\n\tPMP  0x%08X", node->gam_gdp_pmp);
+	seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
+	gdp_dbg_size(s, node->gam_gdp_size);
+	seq_printf(s, "\n\tNVN  0x%08X", node->gam_gdp_nvn);
+	seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
+	seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
+	seq_printf(s, "\n\tPPT  0x%08X", node->gam_gdp_ppt);
+	gdp_dbg_ppt(s, node->gam_gdp_ppt);
+	seq_printf(s, "\n\tCML  0x%08X", node->gam_gdp_cml);
+	seq_puts(s, "\n");
+}
+
+static int gdp_node_dbg_show(struct seq_file *s, void *arg)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	unsigned int b;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for (b = 0; b < GDP_NODE_NB_BANK; b++) {
+		seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
+		gdp_node_dump_node(s, gdp->node_list[b].top_field);
+		seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
+		gdp_node_dump_node(s, gdp->node_list[b].btm_field);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list gdp0_debugfs_files[] = {
+	{ "gdp0", gdp_dbg_show, 0, NULL },
+	{ "gdp0_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp1_debugfs_files[] = {
+	{ "gdp1", gdp_dbg_show, 0, NULL },
+	{ "gdp1_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp2_debugfs_files[] = {
+	{ "gdp2", gdp_dbg_show, 0, NULL },
+	{ "gdp2_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp3_debugfs_files[] = {
+	{ "gdp3", gdp_dbg_show, 0, NULL },
+	{ "gdp3_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+{
+	unsigned int i;
+	struct drm_info_list *gdp_debugfs_files;
+	int nb_files;
+
+	switch (gdp->plane.desc) {
+	case STI_GDP_0:
+		gdp_debugfs_files = gdp0_debugfs_files;
+		nb_files = ARRAY_SIZE(gdp0_debugfs_files);
+		break;
+	case STI_GDP_1:
+		gdp_debugfs_files = gdp1_debugfs_files;
+		nb_files = ARRAY_SIZE(gdp1_debugfs_files);
+		break;
+	case STI_GDP_2:
+		gdp_debugfs_files = gdp2_debugfs_files;
+		nb_files = ARRAY_SIZE(gdp2_debugfs_files);
+		break;
+	case STI_GDP_3:
+		gdp_debugfs_files = gdp3_debugfs_files;
+		nb_files = ARRAY_SIZE(gdp3_debugfs_files);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nb_files; i++)
+		gdp_debugfs_files[i].data = gdp;
+
+	return drm_debugfs_create_files(gdp_debugfs_files,
+					nb_files,
+					minor->debugfs_root, minor);
+}
+
 static int sti_gdp_fourcc2format(int fourcc)
 {
 	switch (fourcc) {
@@ -146,14 +371,6 @@
 		return GDP_RGB565;
 	case DRM_FORMAT_RGB888:
 		return GDP_RGB888;
-	case DRM_FORMAT_AYUV:
-		return GDP_AYCBR8888;
-	case DRM_FORMAT_YUV444:
-		return GDP_YCBR888;
-	case DRM_FORMAT_VYUY:
-		return GDP_YCBR422R;
-	case DRM_FORMAT_C8:
-		return GDP_CLUT8;
 	}
 	return -1;
 }
@@ -163,7 +380,6 @@
 	switch (format) {
 	case GDP_ARGB8565:
 	case GDP_ARGB8888:
-	case GDP_AYCBR8888:
 	case GDP_ABGR8888:
 		return GAM_GDP_ALPHARANGE_255;
 	}
@@ -240,9 +456,6 @@
  */
 static void sti_gdp_disable(struct sti_gdp *gdp)
 {
-	struct drm_plane *drm_plane = &gdp->plane.drm_plane;
-	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
-	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	unsigned int i;
 
 	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
@@ -253,8 +466,7 @@
 		gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 	}
 
-	if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+	if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	if (gdp->clk_pix)
@@ -379,37 +591,54 @@
 	}
 }
 
-static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
-				  struct drm_plane_state *oldstate)
+/**
+ * sti_gdp_get_dst
+ * @dev: device
+ * @dst: requested destination size
+ * @src: source size
+ *
+ * Return the cropped / clamped destination size
+ *
+ * RETURNS:
+ * cropped / clamped destination size
+ */
+static int sti_gdp_get_dst(struct device *dev, int dst, int src)
 {
-	struct drm_plane_state *state = drm_plane->state;
+	if (dst == src)
+		return dst;
+
+	if (dst < src) {
+		dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
+		return dst;
+	}
+
+	dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
+	return src;
+}
+
+static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
+				struct drm_plane_state *state)
+{
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 	struct sti_gdp *gdp = to_sti_gdp(plane);
 	struct drm_crtc *crtc = state->crtc;
 	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	struct drm_framebuffer *fb =  state->fb;
 	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_crtc_state *crtc_state;
 	struct sti_mixer *mixer;
 	struct drm_display_mode *mode;
 	int dst_x, dst_y, dst_w, dst_h;
 	int src_x, src_y, src_w, src_h;
-	struct drm_gem_cma_object *cma_obj;
-	struct sti_gdp_node_list *list;
-	struct sti_gdp_node_list *curr_list;
-	struct sti_gdp_node *top_field, *btm_field;
-	u32 dma_updated_top;
-	u32 dma_updated_btm;
 	int format;
-	unsigned int depth, bpp;
-	u32 ydo, xdo, yds, xds;
-	int res;
 
-	/* Manage the case where crtc is null (disabled) */
-	if (!crtc)
-		return;
+	/* no need for further checks if the plane is being disabled */
+	if (!crtc || !fb)
+		return 0;
 
 	mixer = to_sti_mixer(crtc);
-	mode = &crtc->mode;
+	crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+	mode = &crtc_state->mode;
 	dst_x = state->crtc_x;
 	dst_y = state->crtc_y;
 	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
@@ -417,92 +646,41 @@
 	/* src_x are in 16.16 format */
 	src_x = state->src_x >> 16;
 	src_y = state->src_y >> 16;
-	src_w = state->src_w >> 16;
-	src_h = state->src_h >> 16;
+	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
 
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      drm_plane->base.id, sti_plane_to_str(plane));
-	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-		      sti_plane_to_str(plane),
-		      dst_w, dst_h, dst_x, dst_y,
-		      src_w, src_h, src_x, src_y);
-
-	list = sti_gdp_get_free_nodes(gdp);
-	top_field = list->top_field;
-	btm_field = list->btm_field;
-
-	dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
-		sti_plane_to_str(plane), top_field, btm_field);
-
-	/* build the top field */
-	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
-	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
 	format = sti_gdp_fourcc2format(fb->pixel_format);
 	if (format == -1) {
 		DRM_ERROR("Format not supported by GDP %.4s\n",
 			  (char *)&fb->pixel_format);
-		return;
+		return -EINVAL;
 	}
-	top_field->gam_gdp_ctl |= format;
-	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
-	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
 
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
+	if (!drm_fb_cma_get_gem_obj(fb, 0)) {
 		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return;
+		return -EINVAL;
 	}
 
-	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
-			 (char *)&fb->pixel_format,
-			 (unsigned long)cma_obj->paddr);
-
-	/* pixel memory location */
-	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
-	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
-	top_field->gam_gdp_pml += src_x * (bpp >> 3);
-	top_field->gam_gdp_pml += src_y * fb->pitches[0];
-
-	/* input parameters */
-	top_field->gam_gdp_pmp = fb->pitches[0];
-	top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
-				  clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
-
-	/* output parameters */
-	ydo = sti_vtg_get_line_number(*mode, dst_y);
-	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
-	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
-	top_field->gam_gdp_vps = (yds << 16) | xds;
-
-	/* Same content and chained together */
-	memcpy(btm_field, top_field, sizeof(*btm_field));
-	top_field->gam_gdp_nvn = list->btm_field_paddr;
-	btm_field->gam_gdp_nvn = list->top_field_paddr;
-
-	/* Interlaced mode */
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
-					 fb->pitches[0];
-
 	if (first_prepare) {
 		/* Register gdp callback */
-		if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
-				compo->vtg_main : compo->vtg_aux,
-				&gdp->vtg_field_nb, crtc)) {
+		gdp->vtg = mixer->id == STI_MIXER_MAIN ?
+					compo->vtg_main : compo->vtg_aux;
+		if (sti_vtg_register_client(gdp->vtg,
+					    &gdp->vtg_field_nb, crtc)) {
 			DRM_ERROR("Cannot register VTG notifier\n");
-			return;
+			return -EINVAL;
 		}
 
 		/* Set and enable gdp clock */
 		if (gdp->clk_pix) {
 			struct clk *clkp;
 			int rate = mode->clock * 1000;
+			int res;
 
-			/* According to the mixer used, the gdp pixel clock
-			 * should have a different parent clock. */
+			/*
+			 * According to the mixer used, the gdp pixel clock
+			 * should have a different parent clock.
+			 */
 			if (mixer->id == STI_MIXER_MAIN)
 				clkp = gdp->clk_main_parent;
 			else
@@ -515,16 +693,114 @@
 			if (res < 0) {
 				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
 					  rate);
-				return;
+				return -EINVAL;
 			}
 
 			if (clk_prepare_enable(gdp->clk_pix)) {
 				DRM_ERROR("Failed to prepare/enable gdp\n");
-				return;
+				return -EINVAL;
 			}
 		}
 	}
 
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	return 0;
+}
+
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+				  struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_gdp *gdp = to_sti_gdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_framebuffer *fb =  state->fb;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_gdp_node_list *list;
+	struct sti_gdp_node_list *curr_list;
+	struct sti_gdp_node *top_field, *btm_field;
+	u32 dma_updated_top;
+	u32 dma_updated_btm;
+	int format;
+	unsigned int depth, bpp;
+	u32 ydo, xdo, yds, xds;
+
+	if (!crtc || !fb)
+		return;
+
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+	src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+
+	list = sti_gdp_get_free_nodes(gdp);
+	top_field = list->top_field;
+	btm_field = list->btm_field;
+
+	dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+		sti_plane_to_str(plane), top_field, btm_field);
+
+	/* build the top field */
+	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+	format = sti_gdp_fourcc2format(fb->pixel_format);
+	top_field->gam_gdp_ctl |= format;
+	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* pixel memory location */
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+	top_field->gam_gdp_pml += src_x * (bpp >> 3);
+	top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+	/* output parameters (clamped / cropped) */
+	dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
+	dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+	top_field->gam_gdp_vps = (yds << 16) | xds;
+
+	/* input parameters */
+	src_w = dst_w;
+	top_field->gam_gdp_pmp = fb->pitches[0];
+	top_field->gam_gdp_size = src_h << 16 | src_w;
+
+	/* Same content and chained together */
+	memcpy(btm_field, top_field, sizeof(*btm_field));
+	top_field->gam_gdp_nvn = list->btm_field_paddr;
+	btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+	/* Interlaced mode */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+					 fb->pitches[0];
+
 	/* Update the NVN field of the 'right' field of the current GDP node
 	 * (being used by the HW) with the address of the updated ('free') top
 	 * field GDP node.
@@ -573,6 +849,8 @@
 	}
 
 end:
+	sti_plane_update_fps(plane, true, false);
+
 	plane->status = STI_PLANE_UPDATED;
 }
 
@@ -580,7 +858,6 @@
 				   struct drm_plane_state *oldstate)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
-	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
 
 	if (!drm_plane->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -589,13 +866,15 @@
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
 }
 
 static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+	.atomic_check = sti_gdp_atomic_check,
 	.atomic_update = sti_gdp_atomic_update,
 	.atomic_disable = sti_gdp_atomic_disable,
 };
@@ -639,6 +918,9 @@
 
 	sti_plane_init_property(&gdp->plane, type);
 
+	if (gdp_debugfs_init(gdp, drm_dev->primary))
+		DRM_ERROR("GDP debugfs setup failed\n");
+
 	return &gdp->plane.drm_plane;
 
 err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 49cce83..ec0d017 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -326,6 +326,103 @@
 	}
 }
 
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(hda->regs + reg))
+
+static void hda_dbg_cfg(struct seq_file *s, int val)
+{
+	seq_puts(s, "\tAWG ");
+	seq_puts(s, val & CFG_AWG_ASYNC_EN ? "enabled" : "disabled");
+}
+
+static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+	unsigned int i;
+
+	seq_puts(s, "\n\n");
+	seq_puts(s, "  HDA AWG microcode:");
+	for (i = 0; i < AWG_MAX_INST; i++) {
+		if (i % 8 == 0)
+			seq_printf(s, "\n  %04X:", i);
+		seq_printf(s, " %04X", readl(reg + i * 4));
+	}
+}
+
+static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
+{
+	u32 val = readl(reg);
+	u32 mask;
+
+	switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
+	case VIDEO_DACS_CONTROL_SYSCFG2535:
+		mask = DAC_CFG_HD_OFF_MASK;
+		break;
+	case VIDEO_DACS_CONTROL_SYSCFG5072:
+		mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
+		return;
+	}
+
+	seq_puts(s, "\n");
+	seq_printf(s, "\n  %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
+	seq_puts(s, "\tHD DACs ");
+	seq_puts(s, val & mask ? "disabled" : "enabled");
+}
+
+static int hda_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
+	DBGFS_DUMP(HDA_ANA_CFG);
+	hda_dbg_cfg(s, readl(hda->regs + HDA_ANA_CFG));
+	DBGFS_DUMP(HDA_ANA_SCALE_CTRL_Y);
+	DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CB);
+	DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CR);
+	DBGFS_DUMP(HDA_ANA_ANC_CTRL);
+	DBGFS_DUMP(HDA_ANA_SRC_Y_CFG);
+	DBGFS_DUMP(HDA_ANA_SRC_C_CFG);
+	hda_dbg_awg_microcode(s, hda->regs + HDA_SYNC_AWGI);
+	if (hda->video_dacs_ctrl)
+		hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list hda_debugfs_files[] = {
+	{ "hda", hda_dbg_show, 0, NULL },
+};
+
+static void hda_debugfs_exit(struct sti_hda *hda, struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(hda_debugfs_files,
+				 ARRAY_SIZE(hda_debugfs_files),
+				 minor);
+}
+
+static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
+		hda_debugfs_files[i].data = hda;
+
+	return drm_debugfs_create_files(hda_debugfs_files,
+					ARRAY_SIZE(hda_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
 /**
  * Configure AWG, writing instructions
  *
@@ -685,6 +782,12 @@
 		goto err_sysfs;
 	}
 
+	/* force to disable hd dacs at startup */
+	hda_enable_hd_dacs(hda, false);
+
+	if (hda_debugfs_init(hda, drm_dev->primary))
+		DRM_ERROR("HDA debugfs setup failed\n");
+
 	return 0;
 
 err_sysfs:
@@ -697,7 +800,10 @@
 static void sti_hda_unbind(struct device *dev,
 		struct device *master, void *data)
 {
-	/* do nothing */
+	struct sti_hda *hda = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	hda_debugfs_exit(hda, drm_dev->primary);
 }
 
 static const struct component_ops sti_hda_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index cd50156..6ef0715 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -6,6 +6,7 @@
 
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/debugfs.h>
 #include <linux/hdmi.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
@@ -51,9 +52,18 @@
 #define HDMI_SW_DI_2_PKT_WORD4          0x0614
 #define HDMI_SW_DI_2_PKT_WORD5          0x0618
 #define HDMI_SW_DI_2_PKT_WORD6          0x061C
+#define HDMI_SW_DI_3_HEAD_WORD          0x0620
+#define HDMI_SW_DI_3_PKT_WORD0          0x0624
+#define HDMI_SW_DI_3_PKT_WORD1          0x0628
+#define HDMI_SW_DI_3_PKT_WORD2          0x062C
+#define HDMI_SW_DI_3_PKT_WORD3          0x0630
+#define HDMI_SW_DI_3_PKT_WORD4          0x0634
+#define HDMI_SW_DI_3_PKT_WORD5          0x0638
+#define HDMI_SW_DI_3_PKT_WORD6          0x063C
 
 #define HDMI_IFRAME_SLOT_AVI            1
 #define HDMI_IFRAME_SLOT_AUDIO          2
+#define HDMI_IFRAME_SLOT_VENDOR         3
 
 #define  XCAT(prefix, x, suffix)        prefix ## x ## suffix
 #define  HDMI_SW_DI_N_HEAD_WORD(x)      XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
@@ -65,6 +75,8 @@
 #define  HDMI_SW_DI_N_PKT_WORD5(x)      XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
 #define  HDMI_SW_DI_N_PKT_WORD6(x)      XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
 
+#define HDMI_SW_DI_MAX_WORD             7
+
 #define HDMI_IFRAME_DISABLED            0x0
 #define HDMI_IFRAME_SINGLE_SHOT         0x1
 #define HDMI_IFRAME_FIELD               0x2
@@ -117,6 +129,8 @@
 	struct drm_connector drm_connector;
 	struct drm_encoder *encoder;
 	struct sti_hdmi *hdmi;
+	struct drm_property *colorspace_property;
+	struct drm_property *hdmi_mode_property;
 };
 
 #define to_sti_hdmi_connector(x) \
@@ -217,8 +231,10 @@
 	/* Clear overrun and underrun fifo */
 	conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
 
-	/* Enable HDMI mode not DVI */
-	conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+	/* Select encryption type and the framing mode */
+	conf |= HDMI_CFG_ESS_NOT_OESS;
+	if (hdmi->hdmi_mode == HDMI_MODE_HDMI)
+		conf |= HDMI_CFG_HDMI_NOT_DVI;
 
 	/* Enable sink term detection */
 	conf |= HDMI_CFG_SINK_TERM_DET_EN;
@@ -241,6 +257,47 @@
 	hdmi_write(hdmi, conf, HDMI_CFG);
 }
 
+/*
+ * Helper to reset info frame
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ * @slot: infoframe to reset
+ */
+static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
+				 u32 slot)
+{
+	u32 val, i;
+	u32 head_offset, pack_offset;
+
+	switch (slot) {
+	case HDMI_IFRAME_SLOT_AVI:
+		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
+		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
+		break;
+	case HDMI_IFRAME_SLOT_AUDIO:
+		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
+		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
+		break;
+	case HDMI_IFRAME_SLOT_VENDOR:
+		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+		break;
+	default:
+		DRM_ERROR("unsupported infoframe slot: %#x\n", slot);
+		return;
+	}
+
+	/* Disable transmission for the selected slot */
+	val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+	val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
+	hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+	/* Reset info frame registers */
+	hdmi_write(hdmi, 0x0, head_offset);
+	for (i = 0; i < HDMI_SW_DI_MAX_WORD; i += sizeof(u32))
+		hdmi_write(hdmi, 0x0, pack_offset + i);
+}
+
 /**
  * Helper to concatenate infoframe in 32 bits word
  *
@@ -266,12 +323,13 @@
  * @data: infoframe to write
  * @size: size to write
  */
-static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
+static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
+					  const u8 *data,
+					  size_t size)
 {
 	const u8 *ptr = data;
 	u32 val, slot, mode, i;
 	u32 head_offset, pack_offset;
-	size_t size;
 
 	switch (*ptr) {
 	case HDMI_INFOFRAME_TYPE_AVI:
@@ -279,17 +337,19 @@
 		mode = HDMI_IFRAME_FIELD;
 		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
 		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
-		size = HDMI_AVI_INFOFRAME_SIZE;
 		break;
-
 	case HDMI_INFOFRAME_TYPE_AUDIO:
 		slot = HDMI_IFRAME_SLOT_AUDIO;
 		mode = HDMI_IFRAME_FRAME;
 		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
 		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
-		size = HDMI_AUDIO_INFOFRAME_SIZE;
 		break;
-
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		slot = HDMI_IFRAME_SLOT_VENDOR;
+		mode = HDMI_IFRAME_FRAME;
+		head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+		pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+		break;
 	default:
 		DRM_ERROR("unsupported infoframe type: %#x\n", *ptr);
 		return;
@@ -308,8 +368,9 @@
 	/*
 	 * Each subpack contains 4 bytes
 	 * The First Bytes of the first subpacket must contain the checksum
-	 * Packet size in increase by one.
+	 * Packet size is increase by one.
 	 */
+	size = size - HDMI_INFOFRAME_HEADER_SIZE + 1;
 	for (i = 0; i < size; i += sizeof(u32)) {
 		size_t num;
 
@@ -321,7 +382,7 @@
 
 	/* Enable transmission slot for updated infoframe */
 	val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
-	val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot);
+	val |= HDMI_IFRAME_CFG_DI_N(mode, slot);
 	hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
 }
 
@@ -352,7 +413,7 @@
 	}
 
 	/* fixed infoframe configuration not linked to the mode */
-	infoframe.colorspace = HDMI_COLORSPACE_RGB;
+	infoframe.colorspace = hdmi->colorspace;
 	infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
 	infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
 
@@ -362,7 +423,7 @@
 		return ret;
 	}
 
-	hdmi_infoframe_write_infopack(hdmi, buffer);
+	hdmi_infoframe_write_infopack(hdmi, buffer, ret);
 
 	return 0;
 }
@@ -398,7 +459,49 @@
 		return ret;
 	}
 
-	hdmi_infoframe_write_infopack(hdmi, buffer);
+	hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+
+	return 0;
+}
+
+/*
+ * Prepare and configure the VS infoframe
+ *
+ * Vendor Specific infoframe are transmitted once per frame and
+ * contains vendor specific information.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+#define HDMI_VENDOR_INFOFRAME_MAX_SIZE 6
+static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
+{
+	struct drm_display_mode *mode = &hdmi->mode;
+	struct hdmi_vendor_infoframe infoframe;
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_MAX_SIZE];
+	int ret;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe, mode);
+	if (ret < 0) {
+		/*
+		 * Going into that statement does not means vendor infoframe
+		 * fails. It just informed us that vendor infoframe is not
+		 * needed for the selected mode. Only  4k or stereoscopic 3D
+		 * mode requires vendor infoframe. So just simply return 0.
+		 */
+		return 0;
+	}
+
+	ret = hdmi_vendor_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+	if (ret < 0) {
+		DRM_ERROR("failed to pack VS infoframe: %d\n", ret);
+		return ret;
+	}
+
+	hdmi_infoframe_write_infopack(hdmi, buffer, ret);
 
 	return 0;
 }
@@ -448,6 +551,172 @@
 	clk_disable_unprepare(hdmi->clk_audio);
 }
 
+#define DBGFS_PRINT_STR(str1, str2) seq_printf(s, "%-24s %s\n", str1, str2)
+#define DBGFS_PRINT_INT(str1, int2) seq_printf(s, "%-24s %d\n", str1, int2)
+#define DBGFS_DUMP(str, reg) seq_printf(s, "%s  %-25s 0x%08X", str, #reg, \
+					hdmi_read(hdmi, reg))
+#define DBGFS_DUMP_DI(reg, slot) DBGFS_DUMP("\n", reg(slot))
+
+static void hdmi_dbg_cfg(struct seq_file *s, int val)
+{
+	int tmp;
+
+	seq_puts(s, "\t");
+	tmp = val & HDMI_CFG_HDMI_NOT_DVI;
+	DBGFS_PRINT_STR("mode:", tmp ? "HDMI" : "DVI");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_HDCP_EN;
+	DBGFS_PRINT_STR("HDCP:", tmp ? "enable" : "disable");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_ESS_NOT_OESS;
+	DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_SINK_TERM_DET_EN;
+	DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
+	DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_V_SYNC_POL_NEG;
+	DBGFS_PRINT_STR("Vsync polarity:", tmp ? "inverted" : "normal");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = val & HDMI_CFG_422_EN;
+	DBGFS_PRINT_STR("YUV422 format:", tmp ? "enable" : "disable");
+}
+
+static void hdmi_dbg_sta(struct seq_file *s, int val)
+{
+	int tmp;
+
+	seq_puts(s, "\t");
+	tmp = (val & HDMI_STA_DLL_LCK);
+	DBGFS_PRINT_STR("pll:", tmp ? "locked" : "not locked");
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_STA_HOT_PLUG);
+	DBGFS_PRINT_STR("hdmi cable:", tmp ? "connected" : "not connected");
+}
+
+static void hdmi_dbg_sw_di_cfg(struct seq_file *s, int val)
+{
+	int tmp;
+	char *const en_di[] = {"no transmission",
+			       "single transmission",
+			       "once every field",
+			       "once every frame"};
+
+	seq_puts(s, "\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 1));
+	DBGFS_PRINT_STR("Data island 1:", en_di[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 2)) >> 4;
+	DBGFS_PRINT_STR("Data island 2:", en_di[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 3)) >> 8;
+	DBGFS_PRINT_STR("Data island 3:", en_di[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 4)) >> 12;
+	DBGFS_PRINT_STR("Data island 4:", en_di[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 5)) >> 16;
+	DBGFS_PRINT_STR("Data island 5:", en_di[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 6)) >> 20;
+	DBGFS_PRINT_STR("Data island 6:", en_di[tmp]);
+}
+
+static int hdmi_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
+	DBGFS_DUMP("\n", HDMI_CFG);
+	hdmi_dbg_cfg(s, hdmi_read(hdmi, HDMI_CFG));
+	DBGFS_DUMP("", HDMI_INT_EN);
+	DBGFS_DUMP("\n", HDMI_STA);
+	hdmi_dbg_sta(s, hdmi_read(hdmi, HDMI_STA));
+	DBGFS_DUMP("", HDMI_ACTIVE_VID_XMIN);
+	seq_puts(s, "\t");
+	DBGFS_PRINT_INT("Xmin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMIN));
+	DBGFS_DUMP("", HDMI_ACTIVE_VID_XMAX);
+	seq_puts(s, "\t");
+	DBGFS_PRINT_INT("Xmax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMAX));
+	DBGFS_DUMP("", HDMI_ACTIVE_VID_YMIN);
+	seq_puts(s, "\t");
+	DBGFS_PRINT_INT("Ymin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMIN));
+	DBGFS_DUMP("", HDMI_ACTIVE_VID_YMAX);
+	seq_puts(s, "\t");
+	DBGFS_PRINT_INT("Ymax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMAX));
+	DBGFS_DUMP("", HDMI_SW_DI_CFG);
+	hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+
+	seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
+		   HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AVI);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AVI);
+	seq_puts(s, "\n");
+	seq_printf(s, "\n AUDIO Infoframe (Data Island slot N=%d):",
+		   HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AUDIO);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AUDIO);
+	seq_puts(s, "\n");
+	seq_printf(s, "\n VENDOR SPECIFIC Infoframe (Data Island slot N=%d):",
+		   HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_VENDOR);
+	DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list hdmi_debugfs_files[] = {
+	{ "hdmi", hdmi_dbg_show, 0, NULL },
+};
+
+static void hdmi_debugfs_exit(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(hdmi_debugfs_files,
+				 ARRAY_SIZE(hdmi_debugfs_files),
+				 minor);
+}
+
+static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
+		hdmi_debugfs_files[i].data = hdmi;
+
+	return drm_debugfs_create_files(hdmi_debugfs_files,
+					ARRAY_SIZE(hdmi_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
 static void sti_hdmi_disable(struct drm_bridge *bridge)
 {
 	struct sti_hdmi *hdmi = bridge->driver_private;
@@ -468,6 +737,11 @@
 	/* Stop the phy */
 	hdmi->phy_ops->stop(hdmi);
 
+	/* Reset info frame transmission */
+	hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AVI);
+	hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AUDIO);
+	hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_VENDOR);
+
 	/* Set the default channel data to be a dark red */
 	hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
 	hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
@@ -523,6 +797,10 @@
 	if (hdmi_audio_infoframe_config(hdmi))
 		DRM_ERROR("Unable to configure AUDIO infoframe\n");
 
+	/* Program VS infoframe */
+	if (hdmi_vendor_infoframe_config(hdmi))
+		DRM_ERROR("Unable to configure VS infoframe\n");
+
 	/* Sw reset */
 	hdmi_swreset(hdmi);
 }
@@ -664,12 +942,97 @@
 	kfree(hdmi_connector);
 }
 
+static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
+					     struct drm_connector *connector)
+{
+	struct sti_hdmi_connector *hdmi_connector
+		= to_sti_hdmi_connector(connector);
+	struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+	struct drm_property *prop;
+
+	/* colorspace property */
+	hdmi->colorspace = DEFAULT_COLORSPACE_MODE;
+	prop = drm_property_create_enum(drm_dev, 0, "colorspace",
+					colorspace_mode_names,
+					ARRAY_SIZE(colorspace_mode_names));
+	if (!prop) {
+		DRM_ERROR("fails to create colorspace property\n");
+		return;
+	}
+	hdmi_connector->colorspace_property = prop;
+	drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
+
+	/* hdmi_mode property */
+	hdmi->hdmi_mode = DEFAULT_HDMI_MODE;
+	prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode",
+					hdmi_mode_names,
+					ARRAY_SIZE(hdmi_mode_names));
+	if (!prop) {
+		DRM_ERROR("fails to create colorspace property\n");
+		return;
+	}
+	hdmi_connector->hdmi_mode_property = prop;
+	drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode);
+
+}
+
+static int
+sti_hdmi_connector_set_property(struct drm_connector *connector,
+				struct drm_connector_state *state,
+				struct drm_property *property,
+				uint64_t val)
+{
+	struct sti_hdmi_connector *hdmi_connector
+		= to_sti_hdmi_connector(connector);
+	struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+	if (property == hdmi_connector->colorspace_property) {
+		hdmi->colorspace = val;
+		return 0;
+	}
+
+	if (property == hdmi_connector->hdmi_mode_property) {
+		hdmi->hdmi_mode = val;
+		return 0;
+	}
+
+	DRM_ERROR("failed to set hdmi connector property\n");
+	return -EINVAL;
+}
+
+static int
+sti_hdmi_connector_get_property(struct drm_connector *connector,
+				const struct drm_connector_state *state,
+				struct drm_property *property,
+				uint64_t *val)
+{
+	struct sti_hdmi_connector *hdmi_connector
+		= to_sti_hdmi_connector(connector);
+	struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+	if (property == hdmi_connector->colorspace_property) {
+		*val = hdmi->colorspace;
+		return 0;
+	}
+
+	if (property == hdmi_connector->hdmi_mode_property) {
+		*val = hdmi->hdmi_mode;
+		return 0;
+	}
+
+	DRM_ERROR("failed to get hdmi connector property\n");
+	return -EINVAL;
+}
+
 static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
 	.dpms = drm_atomic_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.detect = sti_hdmi_connector_detect,
 	.destroy = sti_hdmi_connector_destroy,
 	.reset = drm_atomic_helper_connector_reset,
+	.set_property = drm_atomic_helper_connector_set_property,
+	.atomic_set_property = sti_hdmi_connector_set_property,
+	.atomic_get_property = sti_hdmi_connector_get_property,
 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
@@ -729,6 +1092,9 @@
 	drm_connector_helper_add(drm_connector,
 			&sti_hdmi_connector_helper_funcs);
 
+	/* initialise property */
+	sti_hdmi_connector_init_property(drm_dev, drm_connector);
+
 	err = drm_connector_register(drm_connector);
 	if (err)
 		goto err_connector;
@@ -742,6 +1108,9 @@
 	/* Enable default interrupts */
 	hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
 
+	if (hdmi_debugfs_init(hdmi, drm_dev->primary))
+		DRM_ERROR("HDMI debugfs setup failed\n");
+
 	return 0;
 
 err_sysfs:
@@ -755,7 +1124,10 @@
 static void sti_hdmi_unbind(struct device *dev,
 		struct device *master, void *data)
 {
-	/* do nothing */
+	struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+
+	hdmi_debugfs_exit(hdmi, drm_dev->primary);
 }
 
 static const struct component_ops sti_hdmi_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 3d22390..ef3a945 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -7,15 +7,14 @@
 #ifndef _STI_HDMI_H_
 #define _STI_HDMI_H_
 
+#include <linux/hdmi.h>
 #include <linux/platform_device.h>
 
 #include <drm/drmP.h>
 
 #define HDMI_STA           0x0010
 #define HDMI_STA_DLL_LCK   BIT(5)
-
-#define HDMI_STA_HOT_PLUG_SHIFT 4
-#define HDMI_STA_HOT_PLUG	(1 << HDMI_STA_HOT_PLUG_SHIFT)
+#define HDMI_STA_HOT_PLUG  BIT(4)
 
 struct sti_hdmi;
 
@@ -24,6 +23,27 @@
 	void (*stop)(struct sti_hdmi *hdmi);
 };
 
+/* values for the framing mode property */
+enum sti_hdmi_modes {
+	HDMI_MODE_HDMI,
+	HDMI_MODE_DVI,
+};
+
+static const struct drm_prop_enum_list hdmi_mode_names[] = {
+	{ HDMI_MODE_HDMI, "hdmi" },
+	{ HDMI_MODE_DVI, "dvi" },
+};
+
+#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI
+
+static const struct drm_prop_enum_list colorspace_mode_names[] = {
+	{ HDMI_COLORSPACE_RGB, "rgb" },
+	{ HDMI_COLORSPACE_YUV422, "yuv422" },
+	{ HDMI_COLORSPACE_YUV444, "yuv444" },
+};
+
+#define DEFAULT_COLORSPACE_MODE HDMI_COLORSPACE_RGB
+
 /**
  * STI hdmi structure
  *
@@ -44,6 +64,9 @@
  * @wait_event: wait event
  * @event_received: wait event status
  * @reset: reset control of the hdmi phy
+ * @ddc_adapt: i2c ddc adapter
+ * @colorspace: current colorspace selected
+ * @hdmi_mode: select framing for HDMI or DVI
  */
 struct sti_hdmi {
 	struct device dev;
@@ -64,6 +87,8 @@
 	bool event_received;
 	struct reset_control *reset;
 	struct i2c_adapter *ddc_adapt;
+	enum hdmi_colorspace colorspace;
+	enum sti_hdmi_modes hdmi_mode;
 };
 
 u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1d3c3d0..e05b0dc 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -4,14 +4,11 @@
  * License terms:  GNU General Public License (GPL), version 2
  */
 
-#include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
 #include <linux/reset.h>
 
-#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
@@ -329,8 +326,6 @@
  * @reset:             reset control
  * @vtg_nb:            notifier to handle VTG Vsync
  * @btm_field_pending: is there any bottom field (interlaced frame) to display
- * @curr_field_count:  number of field updates
- * @last_field_count:  number of field updates since last fps measure
  * @hqvdp_cmd:         buffer of commands
  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
  * @vtg:               vtg for main data path
@@ -346,10 +341,8 @@
 	struct reset_control *reset;
 	struct notifier_block vtg_nb;
 	bool btm_field_pending;
-	unsigned int curr_field_count;
-	unsigned int last_field_count;
 	void *hqvdp_cmd;
-	dma_addr_t hqvdp_cmd_paddr;
+	u32 hqvdp_cmd_paddr;
 	struct sti_vtg *vtg;
 	bool xp70_initialized;
 };
@@ -372,8 +365,8 @@
  */
 static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
 {
-	int curr_cmd, next_cmd;
-	dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+	u32 curr_cmd, next_cmd;
+	u32 cmd = hqvdp->hqvdp_cmd_paddr;
 	int i;
 
 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -400,8 +393,8 @@
  */
 static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
 {
-	int curr_cmd;
-	dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+	u32 curr_cmd;
+	u32 cmd = hqvdp->hqvdp_cmd_paddr;
 	unsigned int i;
 
 	curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -417,6 +410,246 @@
 }
 
 /**
+ * sti_hqvdp_get_next_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the next hqvdp_cmd that will be used by the FW.
+ *
+ * RETURNS:
+ *  the offset of the next command that will be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
+{
+	int next_cmd;
+	dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+	unsigned int i;
+
+	next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+	for (i = 0; i < NB_VDP_CMD; i++) {
+		if (cmd == next_cmd)
+			return i * sizeof(struct sti_hqvdp_cmd);
+
+		cmd += sizeof(struct sti_hqvdp_cmd);
+	}
+
+	return -1;
+}
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(hqvdp->regs + reg))
+
+static const char *hqvdp_dbg_get_lut(u32 *coef)
+{
+	if (!memcmp(coef, coef_lut_a_legacy, 16))
+		return "LUT A";
+	if (!memcmp(coef, coef_lut_b, 16))
+		return "LUT B";
+	if (!memcmp(coef, coef_lut_c_y_legacy, 16))
+		return "LUT C Y";
+	if (!memcmp(coef, coef_lut_c_c_legacy, 16))
+		return "LUT C C";
+	if (!memcmp(coef, coef_lut_d_y_legacy, 16))
+		return "LUT D Y";
+	if (!memcmp(coef, coef_lut_d_c_legacy, 16))
+		return "LUT D C";
+	if (!memcmp(coef, coef_lut_e_y_legacy, 16))
+		return "LUT E Y";
+	if (!memcmp(coef, coef_lut_e_c_legacy, 16))
+		return "LUT E C";
+	if (!memcmp(coef, coef_lut_f_y_legacy, 16))
+		return "LUT F Y";
+	if (!memcmp(coef, coef_lut_f_c_legacy, 16))
+		return "LUT F C";
+	return "<UNKNOWN>";
+}
+
+static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
+{
+	int src_w, src_h, dst_w, dst_h;
+
+	seq_puts(s, "\n\tTOP:");
+	seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
+	switch (c->top.config) {
+	case TOP_CONFIG_PROGRESSIVE:
+		seq_puts(s, "\tProgressive");
+		break;
+	case TOP_CONFIG_INTER_TOP:
+		seq_puts(s, "\tInterlaced, top field");
+		break;
+	case TOP_CONFIG_INTER_BTM:
+		seq_puts(s, "\tInterlaced, bottom field");
+		break;
+	default:
+		seq_puts(s, "\t<UNKNOWN>");
+		break;
+	}
+
+	seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
+	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
+	seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
+	seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
+	seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
+		   c->top.chroma_src_pitch);
+	seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
+		   c->top.input_frame_size);
+	seq_printf(s, "\t%dx%d",
+		   c->top.input_frame_size & 0x0000FFFF,
+		   c->top.input_frame_size >> 16);
+	seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
+		   c->top.input_viewport_size);
+	src_w = c->top.input_viewport_size & 0x0000FFFF;
+	src_h = c->top.input_viewport_size >> 16;
+	seq_printf(s, "\t%dx%d", src_w, src_h);
+
+	seq_puts(s, "\n\tHVSRC:");
+	seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
+		   c->hvsrc.output_picture_size);
+	dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
+	dst_h = c->hvsrc.output_picture_size >> 16;
+	seq_printf(s, "\t%dx%d", dst_w, dst_h);
+	seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
+
+	seq_printf(s, "\n\t %-20s %s", "yh_coef",
+		   hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
+	seq_printf(s, "\n\t %-20s %s", "ch_coef",
+		   hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
+	seq_printf(s, "\n\t %-20s %s", "yv_coef",
+		   hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
+	seq_printf(s, "\n\t %-20s %s", "cv_coef",
+		   hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
+
+	seq_printf(s, "\n\t %-20s", "ScaleH");
+	if (dst_w > src_w)
+		seq_printf(s, " %d/1", dst_w / src_w);
+	else
+		seq_printf(s, " 1/%d", src_w / dst_w);
+
+	seq_printf(s, "\n\t %-20s", "tScaleV");
+	if (dst_h > src_h)
+		seq_printf(s, " %d/1", dst_h / src_h);
+	else
+		seq_printf(s, " 1/%d", src_h / dst_h);
+
+	seq_puts(s, "\n\tCSDI:");
+	seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
+	switch (c->csdi.config) {
+	case CSDI_CONFIG_PROG:
+		seq_puts(s, "Bypass");
+		break;
+	case CSDI_CONFIG_INTER_DIR:
+		seq_puts(s, "Deinterlace, directional");
+		break;
+	default:
+		seq_puts(s, "<UNKNOWN>");
+		break;
+	}
+
+	seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
+	seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
+}
+
+static int hqvdp_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int cmd, cmd_offset, infoxp70;
+	void *virt;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "%s: (vaddr = 0x%p)",
+		   sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
+
+	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
+	DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
+	DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
+	DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
+	infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
+	seq_puts(s, "\tFirmware state: ");
+	if (infoxp70 & INFO_XP70_FW_READY)
+		seq_puts(s, "idle and ready");
+	else if (infoxp70 & INFO_XP70_FW_PROCESSING)
+		seq_puts(s, "processing a picture");
+	else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
+		seq_puts(s, "programming queues");
+	else
+		seq_puts(s, "NOT READY");
+
+	DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
+	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
+	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+					& STARTUP_CTRL1_RST_DONE)
+		seq_puts(s, "\tReset is done");
+	else
+		seq_puts(s, "\tReset is NOT done");
+	DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
+	if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
+					& STARTUP_CTRL2_FETCH_EN)
+		seq_puts(s, "\tFetch is enabled");
+	else
+		seq_puts(s, "\tFetch is NOT enabled");
+	DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
+	DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
+	DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
+	DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
+	if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
+		seq_puts(s, "\tHW Vsync");
+	else
+		seq_puts(s, "\tSW Vsync ?!?!");
+
+	/* Last command */
+	cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+	cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
+	if (cmd_offset == -1) {
+		seq_puts(s, "\n\n  Last command: unknown");
+	} else {
+		virt = hqvdp->hqvdp_cmd + cmd_offset;
+		seq_printf(s, "\n\n  Last command: address @ 0x%x (0x%p)",
+			   cmd, virt);
+		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+	}
+
+	/* Next command */
+	cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+	cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
+	if (cmd_offset == -1) {
+		seq_puts(s, "\n\n  Next command: unknown");
+	} else {
+		virt = hqvdp->hqvdp_cmd + cmd_offset;
+		seq_printf(s, "\n\n  Next command address: @ 0x%x (0x%p)",
+			   cmd, virt);
+		hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+	}
+
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list hqvdp_debugfs_files[] = {
+	{ "hqvdp", hqvdp_dbg_show, 0, NULL },
+};
+
+static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
+		hqvdp_debugfs_files[i].data = hqvdp;
+
+	return drm_debugfs_create_files(hqvdp_debugfs_files,
+					ARRAY_SIZE(hqvdp_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
+/**
  * sti_hqvdp_update_hvsrc
  * @orient: horizontal or vertical
  * @scale:  scaling/zoom factor
@@ -580,7 +813,7 @@
 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
 		top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
 		if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
-			DRM_ERROR("Cannot get cmds, skip btm field\n");
+			DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
 			return -EBUSY;
 		}
 
@@ -599,11 +832,12 @@
 		writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
 				hqvdp->regs + HQVDP_MBX_NEXT_CMD);
 
-		hqvdp->curr_field_count++;
 		hqvdp->btm_field_pending = false;
 
 		dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
 				__func__, hqvdp->hqvdp_cmd_paddr);
+
+		sti_plane_update_fps(&hqvdp->plane, false, true);
 	}
 
 	return 0;
@@ -612,19 +846,21 @@
 static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
 {
 	int size;
+	dma_addr_t dma_addr;
 
 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
 
 	/* Allocate memory for the VDP commands */
 	size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
 	hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
-					&hqvdp->hqvdp_cmd_paddr,
+					&dma_addr,
 					GFP_KERNEL | GFP_DMA);
 	if (!hqvdp->hqvdp_cmd) {
 		DRM_ERROR("Failed to allocate memory for VDP cmd\n");
 		return;
 	}
 
+	hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
 	memset(hqvdp->hqvdp_cmd, 0, size);
 }
 
@@ -670,7 +906,7 @@
 	DRM_DEBUG_DRIVER("\n");
 
 	if (hqvdp->xp70_initialized) {
-		DRM_INFO("HQVDP XP70 already initialized\n");
+		DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
 		return;
 	}
 
@@ -775,6 +1011,94 @@
 	release_firmware(firmware);
 }
 
+static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
+				  struct drm_plane_state *state)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_framebuffer *fb = state->fb;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_crtc_state *crtc_state;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
+
+	/* no need for further checks if the plane is being disabled */
+	if (!crtc || !fb)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+	mode = &crtc_state->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
+
+	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+					src_w, src_h,
+					dst_w, dst_h)) {
+		DRM_ERROR("Scaling beyond HW capabilities\n");
+		return -EINVAL;
+	}
+
+	if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Input / output size
+	 * Align to upper even value
+	 */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
+
+	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+			  src_w, src_h,
+			  dst_w, dst_h);
+		return -EINVAL;
+	}
+
+	if (first_prepare) {
+		/* Start HQVDP XP70 coprocessor */
+		sti_hqvdp_start_xp70(hqvdp);
+
+		/* Prevent VTG shutdown */
+		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+			DRM_ERROR("Failed to prepare/enable pix main clk\n");
+			return -EINVAL;
+		}
+
+		/* Register VTG Vsync callback to handle bottom fields */
+		if (sti_vtg_register_client(hqvdp->vtg,
+					    &hqvdp->vtg_nb,
+					    crtc)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return -EINVAL;
+		}
+	}
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	return 0;
+}
+
 static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
 				    struct drm_plane_state *oldstate)
 {
@@ -782,46 +1106,36 @@
 	struct sti_plane *plane = to_sti_plane(drm_plane);
 	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
 	struct drm_crtc *crtc = state->crtc;
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
 	struct drm_framebuffer *fb = state->fb;
-	struct drm_display_mode *mode = &crtc->mode;
-	int dst_x = state->crtc_x;
-	int dst_y = state->crtc_y;
-	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
-	/* src_x are in 16.16 format */
-	int src_x = state->src_x >> 16;
-	int src_y = state->src_y >> 16;
-	int src_w = state->src_w >> 16;
-	int src_h = state->src_h >> 16;
-	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
 	struct drm_gem_cma_object *cma_obj;
 	struct sti_hqvdp_cmd *cmd;
 	int scale_h, scale_v;
 	int cmd_offset;
 
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      drm_plane->base.id, sti_plane_to_str(plane));
-	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-		      sti_plane_to_str(plane),
-		      dst_w, dst_h, dst_x, dst_y,
-		      src_w, src_h, src_x, src_y);
+	if (!crtc || !fb)
+		return;
+
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
 
 	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
 	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
+		DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
 		return;
 	}
 	cmd = hqvdp->hqvdp_cmd + cmd_offset;
 
-	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
-					src_w, src_h,
-					dst_w, dst_h)) {
-		DRM_ERROR("Scaling beyond HW capabilities\n");
-		return;
-	}
-
 	/* Static parameters, defaulting to progressive mode */
 	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
 	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
@@ -836,10 +1150,6 @@
 	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
 
 	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
-		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return;
-	}
 
 	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
 			 (char *)&fb->pixel_format,
@@ -860,16 +1170,6 @@
 	dst_w = ALIGN(dst_w, 2);
 	dst_h = ALIGN(dst_h, 2);
 
-	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
-	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
-	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
-	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
-		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
-			  src_w, src_h,
-			  dst_w, dst_h);
-		return;
-	}
-
 	cmd->top.input_viewport_size = src_h << 16 | src_w;
 	cmd->top.input_frame_size = src_h << 16 | src_w;
 	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
@@ -900,30 +1200,9 @@
 	scale_v = SCALE_FACTOR * dst_h / src_h;
 	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
 
-	if (first_prepare) {
-		/* Start HQVDP XP70 coprocessor */
-		sti_hqvdp_start_xp70(hqvdp);
-
-		/* Prevent VTG shutdown */
-		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
-			DRM_ERROR("Failed to prepare/enable pix main clk\n");
-			return;
-		}
-
-		/* Register VTG Vsync callback to handle bottom fields */
-		if (sti_vtg_register_client(hqvdp->vtg,
-					    &hqvdp->vtg_nb,
-					    crtc)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return;
-		}
-	}
-
 	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
 	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
 
-	hqvdp->curr_field_count++;
-
 	/* Interlaced : get ready to display the bottom field at next Vsync */
 	if (fb->flags & DRM_MODE_FB_INTERLACED)
 		hqvdp->btm_field_pending = true;
@@ -931,6 +1210,8 @@
 	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
 		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
 
+	sti_plane_update_fps(plane, true, true);
+
 	plane->status = STI_PLANE_UPDATED;
 }
 
@@ -938,7 +1219,6 @@
 				     struct drm_plane_state *oldstate)
 {
 	struct sti_plane *plane = to_sti_plane(drm_plane);
-	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
 
 	if (!drm_plane->crtc) {
 		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -947,13 +1227,15 @@
 	}
 
 	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->crtc->base.id,
+			 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
 			 drm_plane->base.id, sti_plane_to_str(plane));
 
 	plane->status = STI_PLANE_DISABLING;
 }
 
 static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+	.atomic_check = sti_hqvdp_atomic_check,
 	.atomic_update = sti_hqvdp_atomic_update,
 	.atomic_disable = sti_hqvdp_atomic_disable,
 };
@@ -983,6 +1265,9 @@
 
 	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
 
+	if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
+		DRM_ERROR("HQVDP debugfs setup failed\n");
+
 	return &hqvdp->plane.drm_plane;
 }
 
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 49db835..e7425c3 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -75,6 +75,145 @@
 	writel(val, mixer->regs + reg_id);
 }
 
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   sti_mixer_reg_read(mixer, reg))
+
+static void mixer_dbg_ctl(struct seq_file *s, int val)
+{
+	unsigned int i;
+	int count = 0;
+	char *const disp_layer[] = {"BKG", "VID0", "VID1", "GDP0",
+				    "GDP1", "GDP2", "GDP3"};
+
+	seq_puts(s, "\tEnabled: ");
+	for (i = 0; i < 7; i++) {
+		if (val & 1) {
+			seq_printf(s, "%s ", disp_layer[i]);
+			count++;
+		}
+		val = val >> 1;
+	}
+
+	val = val >> 2;
+	if (val & 1) {
+		seq_puts(s, "CURS ");
+		count++;
+	}
+	if (!count)
+		seq_puts(s, "Nothing");
+}
+
+static void mixer_dbg_crb(struct seq_file *s, int val)
+{
+	int i;
+
+	seq_puts(s, "\tDepth: ");
+	for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+		switch (val & GAM_DEPTH_MASK_ID) {
+		case GAM_DEPTH_VID0_ID:
+			seq_puts(s, "VID0");
+			break;
+		case GAM_DEPTH_VID1_ID:
+			seq_puts(s, "VID1");
+			break;
+		case GAM_DEPTH_GDP0_ID:
+			seq_puts(s, "GDP0");
+			break;
+		case GAM_DEPTH_GDP1_ID:
+			seq_puts(s, "GDP1");
+			break;
+		case GAM_DEPTH_GDP2_ID:
+			seq_puts(s, "GDP2");
+			break;
+		case GAM_DEPTH_GDP3_ID:
+			seq_puts(s, "GDP3");
+			break;
+		default:
+			seq_puts(s, "---");
+		}
+
+		if (i < GAM_MIXER_NB_DEPTH_LEVEL - 1)
+			seq_puts(s, " < ");
+		val = val >> 3;
+	}
+}
+
+static void mixer_dbg_mxn(struct seq_file *s, void *addr)
+{
+	int i;
+
+	for (i = 1; i < 8; i++)
+		seq_printf(s, "-0x%08X", (int)readl(addr + i * 4));
+}
+
+static int mixer_dbg_show(struct seq_file *s, void *arg)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "%s: (vaddr = 0x%p)",
+		   sti_mixer_to_str(mixer), mixer->regs);
+
+	DBGFS_DUMP(GAM_MIXER_CTL);
+	mixer_dbg_ctl(s, sti_mixer_reg_read(mixer, GAM_MIXER_CTL));
+	DBGFS_DUMP(GAM_MIXER_BKC);
+	DBGFS_DUMP(GAM_MIXER_BCO);
+	DBGFS_DUMP(GAM_MIXER_BCS);
+	DBGFS_DUMP(GAM_MIXER_AVO);
+	DBGFS_DUMP(GAM_MIXER_AVS);
+	DBGFS_DUMP(GAM_MIXER_CRB);
+	mixer_dbg_crb(s, sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+	DBGFS_DUMP(GAM_MIXER_ACT);
+	DBGFS_DUMP(GAM_MIXER_MBP);
+	DBGFS_DUMP(GAM_MIXER_MX0);
+	mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list mixer0_debugfs_files[] = {
+	{ "mixer_main", mixer_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list mixer1_debugfs_files[] = {
+	{ "mixer_aux", mixer_dbg_show, 0, NULL },
+};
+
+static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+{
+	unsigned int i;
+	struct drm_info_list *mixer_debugfs_files;
+	int nb_files;
+
+	switch (mixer->id) {
+	case STI_MIXER_MAIN:
+		mixer_debugfs_files = mixer0_debugfs_files;
+		nb_files = ARRAY_SIZE(mixer0_debugfs_files);
+		break;
+	case STI_MIXER_AUX:
+		mixer_debugfs_files = mixer1_debugfs_files;
+		nb_files = ARRAY_SIZE(mixer1_debugfs_files);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nb_files; i++)
+		mixer_debugfs_files[i].data = mixer;
+
+	return drm_debugfs_create_files(mixer_debugfs_files,
+					nb_files,
+					minor->debugfs_root, minor);
+}
+
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
 {
 	u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
@@ -237,7 +376,9 @@
 				    mixerColorSpaceMatIdentity[i]);
 }
 
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+				   struct drm_device *drm_dev,
+				   int id,
 				   void __iomem *baseaddr)
 {
 	struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
@@ -258,5 +399,8 @@
 	DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
 			 sti_mixer_to_str(mixer), mixer->regs);
 
+	if (mixer_debugfs_init(mixer, drm_dev->primary))
+		DRM_ERROR("MIXER debugfs setup failed\n");
+
 	return mixer;
 }
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index efb1a9a..6f35fc0 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -42,7 +42,9 @@
 
 const char *sti_mixer_to_str(struct sti_mixer *mixer);
 
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+				   struct drm_device *drm_dev,
+				   int id,
 				   void __iomem *baseaddr);
 
 int sti_mixer_set_plane_status(struct sti_mixer *mixer,
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 2e5c751..f10c98d 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -43,6 +43,69 @@
 	}
 }
 
+#define STI_FPS_INTERVAL_MS     3000
+
+static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+	struct timespec tmp_ts = timespec_sub(lhs, rhs);
+	u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+
+	do_div(tmp_ns, NSEC_PER_MSEC);
+
+	return (u32)tmp_ns;
+}
+
+void sti_plane_update_fps(struct sti_plane *plane,
+			  bool new_frame,
+			  bool new_field)
+{
+	struct timespec now;
+	struct sti_fps_info *fps;
+	int fpks, fipks, ms_since_last, num_frames, num_fields;
+
+	getrawmonotonic(&now);
+
+	/* Compute number of frame updates */
+	fps = &plane->fps_info;
+
+	if (new_field)
+		fps->curr_field_counter++;
+
+	/* do not perform fps calcul if new_frame is false */
+	if (!new_frame)
+		return;
+
+	fps->curr_frame_counter++;
+	ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+	num_frames = fps->curr_frame_counter - fps->last_frame_counter;
+
+	if (num_frames <= 0  || ms_since_last < STI_FPS_INTERVAL_MS)
+		return;
+
+	fps->last_timestamp = now;
+	fps->last_frame_counter = fps->curr_frame_counter;
+	fpks = (num_frames * 1000000) / ms_since_last;
+	snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps",
+		 sti_plane_to_str(plane), fpks / 1000, fpks % 1000);
+
+	if (fps->curr_field_counter) {
+		/* Compute number of field updates */
+		num_fields = fps->curr_field_counter - fps->last_field_counter;
+		fps->last_field_counter = fps->curr_field_counter;
+		fipks = (num_fields * 1000000) / ms_since_last;
+		snprintf(plane->fps_info.fips_str,
+			 FPS_LENGTH, " - %d.%.3d field/sec",
+			 fipks / 1000, fipks % 1000);
+	} else {
+		plane->fps_info.fips_str[0] = '\0';
+	}
+
+	if (fps->output)
+		DRM_INFO("%s%s\n",
+			 plane->fps_info.fps_str,
+			 plane->fps_info.fips_str);
+}
+
 static void sti_plane_destroy(struct drm_plane *drm_plane)
 {
 	DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index 86f1e6f..c50a3b9 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -50,6 +50,18 @@
 	STI_PLANE_DISABLED,
 };
 
+#define FPS_LENGTH 64
+struct sti_fps_info {
+	bool output;
+	unsigned int curr_frame_counter;
+	unsigned int last_frame_counter;
+	unsigned int curr_field_counter;
+	unsigned int last_field_counter;
+	struct timespec last_timestamp;
+	char fps_str[FPS_LENGTH];
+	char fips_str[FPS_LENGTH];
+};
+
 /**
  * STI plane structure
  *
@@ -57,15 +69,20 @@
  * @desc:               plane type & id
  * @status:             to know the status of the plane
  * @zorder:             plane z-order
+ * @fps_info:           frame per second info
  */
 struct sti_plane {
 	struct drm_plane drm_plane;
 	enum sti_plane_desc desc;
 	enum sti_plane_status status;
 	int zorder;
+	struct sti_fps_info fps_info;
 };
 
 const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_update_fps(struct sti_plane *plane,
+			  bool new_frame,
+			  bool new_field);
 void sti_plane_init_property(struct sti_plane *plane,
 			     enum drm_plane_type type);
 #endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f2afcf5..2c99016 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -17,6 +17,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "sti_crtc.h"
+#include "sti_vtg.h"
 
 /* glue registers */
 #define TVO_CSC_MAIN_M0                  0x000
@@ -85,19 +86,7 @@
 #define TVO_VIP_SEL_INPUT_BYPASSED       1
 
 #define TVO_SYNC_MAIN_VTG_SET_REF        0x00
-#define TVO_SYNC_MAIN_VTG_SET_1          0x01
-#define TVO_SYNC_MAIN_VTG_SET_2          0x02
-#define TVO_SYNC_MAIN_VTG_SET_3          0x03
-#define TVO_SYNC_MAIN_VTG_SET_4          0x04
-#define TVO_SYNC_MAIN_VTG_SET_5          0x05
-#define TVO_SYNC_MAIN_VTG_SET_6          0x06
 #define TVO_SYNC_AUX_VTG_SET_REF         0x10
-#define TVO_SYNC_AUX_VTG_SET_1           0x11
-#define TVO_SYNC_AUX_VTG_SET_2           0x12
-#define TVO_SYNC_AUX_VTG_SET_3           0x13
-#define TVO_SYNC_AUX_VTG_SET_4           0x14
-#define TVO_SYNC_AUX_VTG_SET_5           0x15
-#define TVO_SYNC_AUX_VTG_SET_6           0x16
 
 #define TVO_SYNC_HD_DCS_SHIFT            8
 
@@ -106,6 +95,8 @@
 
 #define ENCODER_CRTC_MASK                (BIT(0) | BIT(1))
 
+#define TVO_MIN_HD_HEIGHT                720
+
 /* enum listing the supported output data format */
 enum sti_tvout_video_out_type {
 	STI_TVOUT_VIDEO_OUT_RGB,
@@ -269,6 +260,31 @@
 }
 
 /**
+ * Set preformatter matrix
+ *
+ * @tvout: tvout structure
+ * @mode: display mode structure
+ */
+static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
+					  struct drm_display_mode *mode)
+{
+	unsigned int i;
+	const u32 *pf_matrix;
+
+	if (mode->vdisplay >= TVO_MIN_HD_HEIGHT)
+		pf_matrix = rgb_to_ycbcr_709;
+	else
+		pf_matrix = rgb_to_ycbcr_601;
+
+	for (i = 0; i < 8; i++) {
+		tvout_write(tvout, *(pf_matrix + i),
+			    TVO_CSC_MAIN_M0 + (i * 4));
+		tvout_write(tvout, *(pf_matrix + i),
+			    TVO_CSC_AUX_M0 + (i * 4));
+	}
+}
+
+/**
  * Start VIP block for DVO output
  *
  * @tvout: pointer on tvout structure
@@ -280,24 +296,26 @@
 	struct device_node *node = tvout->dev->of_node;
 	bool sel_input_logic_inverted = false;
 	u32 tvo_in_vid_format;
-	int val;
+	int val, tmp;
 
 	dev_dbg(tvout->dev, "%s\n", __func__);
 
 	if (main_path) {
 		DRM_DEBUG_DRIVER("main vip for DVO\n");
-		/* Select the input sync for dvo = VTG set 4 */
-		val  = TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
-		val |= TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
-		val |= TVO_SYNC_MAIN_VTG_SET_4;
+		/* Select the input sync for dvo */
+		tmp = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_DVO;
+		val  = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+		val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+		val |= tmp;
 		tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
 		tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
 	} else {
 		DRM_DEBUG_DRIVER("aux vip for DVO\n");
-		/* Select the input sync for dvo = VTG set 4 */
-		val  = TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
-		val |= TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
-		val |= TVO_SYNC_AUX_VTG_SET_4;
+		/* Select the input sync for dvo */
+		tmp = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_DVO;
+		val  = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+		val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+		val |= tmp;
 		tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
 		tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
 	}
@@ -308,9 +326,8 @@
 				  TVO_VIP_REORDER_Y_G_SEL,
 				  TVO_VIP_REORDER_CB_B_SEL);
 
-	/* Set clipping mode (Limited range RGB/Y) */
-	tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO,
-				TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+	/* Set clipping mode */
+	tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO, TVO_VIP_CLIP_DISABLED);
 
 	/* Set round mode (rounded to 8-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
@@ -345,13 +362,17 @@
 
 	if (main_path) {
 		DRM_DEBUG_DRIVER("main vip for hdmi\n");
-		/* select the input sync for hdmi = VTG set 1 */
-		tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+		/* select the input sync for hdmi */
+		tvout_write(tvout,
+			    TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+			    TVO_HDMI_SYNC_SEL);
 		tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
 	} else {
 		DRM_DEBUG_DRIVER("aux vip for hdmi\n");
-		/* select the input sync for hdmi = VTG set 1 */
-		tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+		/* select the input sync for hdmi */
+		tvout_write(tvout,
+			    TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+			    TVO_HDMI_SYNC_SEL);
 		tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
 	}
 
@@ -361,9 +382,8 @@
 				  TVO_VIP_REORDER_Y_G_SEL,
 				  TVO_VIP_REORDER_CB_B_SEL);
 
-	/* set clipping mode (Limited range RGB/Y) */
-	tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
-			TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+	/* set clipping mode */
+	tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI, TVO_VIP_CLIP_DISABLED);
 
 	/* set round mode (rounded to 8-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
@@ -397,13 +417,19 @@
 	dev_dbg(tvout->dev, "%s\n", __func__);
 
 	if (main_path) {
-		val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
-		val |= TVO_SYNC_MAIN_VTG_SET_3;
+		DRM_DEBUG_DRIVER("main vip for HDF\n");
+		/* Select the input sync for HD analog and HD DCS */
+		val  = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+		val  = val << TVO_SYNC_HD_DCS_SHIFT;
+		val |= TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDF;
 		tvout_write(tvout, val, TVO_HD_SYNC_SEL);
 		tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
 	} else {
-		val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
-		val |= TVO_SYNC_AUX_VTG_SET_3;
+		DRM_DEBUG_DRIVER("aux vip for HDF\n");
+		/* Select the input sync for HD analog and HD DCS */
+		val  = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+		val  = val << TVO_SYNC_HD_DCS_SHIFT;
+		val |= TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDF;
 		tvout_write(tvout, val, TVO_HD_SYNC_SEL);
 		tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
 	}
@@ -414,8 +440,8 @@
 				  TVO_VIP_REORDER_Y_G_SEL,
 				  TVO_VIP_REORDER_CB_B_SEL);
 
-	/* set clipping mode (EAV/SAV clipping) */
-	tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
+	/* set clipping mode */
+	tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_DISABLED);
 
 	/* set round mode (rounded to 10-bit per component) */
 	tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
@@ -436,15 +462,159 @@
 	tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
 }
 
-static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(tvout->regs + reg))
+
+static void tvout_dbg_vip(struct seq_file *s, int val)
 {
+	int r, g, b, tmp, mask;
+	char *const reorder[] = {"Y_G", "Cb_B", "Cr_R"};
+	char *const clipping[] = {"No", "EAV/SAV", "Limited range RGB/Y",
+				  "Limited range Cb/Cr", "decided by register"};
+	char *const round[] = {"8-bit", "10-bit", "12-bit"};
+	char *const input_sel[] = {"Main (color matrix enabled)",
+				   "Main (color matrix by-passed)",
+				   "", "", "", "", "", "",
+				   "Aux (color matrix enabled)",
+				   "Aux (color matrix by-passed)",
+				   "", "", "", "", "", "Force value"};
+
+	seq_puts(s, "\t");
+	mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT;
+	r = (val & mask) >> TVO_VIP_REORDER_R_SHIFT;
+	mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT;
+	g = (val & mask) >> TVO_VIP_REORDER_G_SHIFT;
+	mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT;
+	b = (val & mask) >> TVO_VIP_REORDER_B_SHIFT;
+	seq_printf(s, "%-24s %s->%s %s->%s %s->%s\n", "Reorder:",
+		   reorder[r], reorder[TVO_VIP_REORDER_CR_R_SEL],
+		   reorder[g], reorder[TVO_VIP_REORDER_Y_G_SEL],
+		   reorder[b], reorder[TVO_VIP_REORDER_CB_B_SEL]);
+	seq_puts(s, "\t\t\t\t\t");
+	mask = TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT;
+	tmp = (val & mask) >> TVO_VIP_CLIP_SHIFT;
+	seq_printf(s, "%-24s %s\n", "Clipping:", clipping[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	mask = TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT;
+	tmp = (val & mask) >> TVO_VIP_RND_SHIFT;
+	seq_printf(s, "%-24s input data rounded to %s per component\n",
+		   "Round:", round[tmp]);
+	seq_puts(s, "\t\t\t\t\t");
+	tmp = (val & TVO_VIP_SEL_INPUT_MASK);
+	seq_printf(s, "%-24s %s", "Input selection:", input_sel[tmp]);
 }
 
-static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
-				       const struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted_mode)
+static void tvout_dbg_hd_dac_cfg(struct seq_file *s, int val)
 {
-	return true;
+	seq_printf(s, "\t%-24s %s", "HD DAC:",
+		   val & 1 ? "disabled" : "enabled");
+}
+
+static int tvout_dbg_show(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_crtc *crtc;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
+
+	seq_puts(s, "\n\n  HDMI encoder: ");
+	crtc = tvout->hdmi->crtc;
+	if (crtc) {
+		seq_printf(s, "connected to %s path",
+			   sti_crtc_is_main(crtc) ? "main" : "aux");
+		DBGFS_DUMP(TVO_HDMI_SYNC_SEL);
+		DBGFS_DUMP(TVO_VIP_HDMI);
+		tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDMI));
+	} else {
+		seq_puts(s, "disabled");
+	}
+
+	seq_puts(s, "\n\n  DVO encoder: ");
+	crtc = tvout->dvo->crtc;
+	if (crtc) {
+		seq_printf(s, "connected to %s path",
+			   sti_crtc_is_main(crtc) ? "main" : "aux");
+		DBGFS_DUMP(TVO_DVO_SYNC_SEL);
+		DBGFS_DUMP(TVO_DVO_CONFIG);
+		DBGFS_DUMP(TVO_VIP_DVO);
+		tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_DVO));
+	} else {
+		seq_puts(s, "disabled");
+	}
+
+	seq_puts(s, "\n\n  HDA encoder: ");
+	crtc = tvout->hda->crtc;
+	if (crtc) {
+		seq_printf(s, "connected to %s path",
+			   sti_crtc_is_main(crtc) ? "main" : "aux");
+		DBGFS_DUMP(TVO_HD_SYNC_SEL);
+		DBGFS_DUMP(TVO_HD_DAC_CFG_OFF);
+		tvout_dbg_hd_dac_cfg(s,
+				     readl(tvout->regs + TVO_HD_DAC_CFG_OFF));
+		DBGFS_DUMP(TVO_VIP_HDF);
+		tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDF));
+	} else {
+		seq_puts(s, "disabled");
+	}
+
+	seq_puts(s, "\n\n  main path configuration");
+	DBGFS_DUMP(TVO_CSC_MAIN_M0);
+	DBGFS_DUMP(TVO_CSC_MAIN_M1);
+	DBGFS_DUMP(TVO_CSC_MAIN_M2);
+	DBGFS_DUMP(TVO_CSC_MAIN_M3);
+	DBGFS_DUMP(TVO_CSC_MAIN_M4);
+	DBGFS_DUMP(TVO_CSC_MAIN_M5);
+	DBGFS_DUMP(TVO_CSC_MAIN_M6);
+	DBGFS_DUMP(TVO_CSC_MAIN_M7);
+	DBGFS_DUMP(TVO_MAIN_IN_VID_FORMAT);
+
+	seq_puts(s, "\n\n  auxiliary path configuration");
+	DBGFS_DUMP(TVO_CSC_AUX_M0);
+	DBGFS_DUMP(TVO_CSC_AUX_M2);
+	DBGFS_DUMP(TVO_CSC_AUX_M3);
+	DBGFS_DUMP(TVO_CSC_AUX_M4);
+	DBGFS_DUMP(TVO_CSC_AUX_M5);
+	DBGFS_DUMP(TVO_CSC_AUX_M6);
+	DBGFS_DUMP(TVO_CSC_AUX_M7);
+	DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list tvout_debugfs_files[] = {
+	{ "tvout", tvout_dbg_show, 0, NULL },
+};
+
+static void tvout_debugfs_exit(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(tvout_debugfs_files,
+				 ARRAY_SIZE(tvout_debugfs_files),
+				 minor);
+}
+
+static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
+		tvout_debugfs_files[i].data = tvout;
+
+	return drm_debugfs_create_files(tvout_debugfs_files,
+					ARRAY_SIZE(tvout_debugfs_files),
+					minor->debugfs_root, minor);
+}
+
+static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
 }
 
 static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
@@ -453,10 +623,6 @@
 {
 }
 
-static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
 static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
@@ -469,10 +635,12 @@
 	.destroy = sti_tvout_encoder_destroy,
 };
 
-static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
+static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
+	tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
 	tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
@@ -486,10 +654,8 @@
 
 static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = {
 	.dpms = sti_tvout_encoder_dpms,
-	.mode_fixup = sti_tvout_encoder_mode_fixup,
 	.mode_set = sti_tvout_encoder_mode_set,
-	.prepare = sti_tvout_encoder_prepare,
-	.commit = sti_dvo_encoder_commit,
+	.enable = sti_dvo_encoder_enable,
 	.disable = sti_dvo_encoder_disable,
 };
 
@@ -520,10 +686,12 @@
 	return drm_encoder;
 }
 
-static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+static void sti_hda_encoder_enable(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
+	tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
 	tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
@@ -540,10 +708,8 @@
 
 static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
 	.dpms = sti_tvout_encoder_dpms,
-	.mode_fixup = sti_tvout_encoder_mode_fixup,
 	.mode_set = sti_tvout_encoder_mode_set,
-	.prepare = sti_tvout_encoder_prepare,
-	.commit = sti_hda_encoder_commit,
+	.commit = sti_hda_encoder_enable,
 	.disable = sti_hda_encoder_disable,
 };
 
@@ -572,10 +738,12 @@
 	return drm_encoder;
 }
 
-static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+static void sti_hdmi_encoder_enable(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
+	tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
 	tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
@@ -589,10 +757,8 @@
 
 static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
 	.dpms = sti_tvout_encoder_dpms,
-	.mode_fixup = sti_tvout_encoder_mode_fixup,
 	.mode_set = sti_tvout_encoder_mode_set,
-	.prepare = sti_tvout_encoder_prepare,
-	.commit = sti_hdmi_encoder_commit,
+	.commit = sti_hdmi_encoder_enable,
 	.disable = sti_hdmi_encoder_disable,
 };
 
@@ -638,26 +804,24 @@
 	if (tvout->hda)
 		drm_encoder_cleanup(tvout->hda);
 	tvout->hda = NULL;
+
+	if (tvout->dvo)
+		drm_encoder_cleanup(tvout->dvo);
+	tvout->dvo = NULL;
 }
 
 static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
 {
 	struct sti_tvout *tvout = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
-	unsigned int i;
 
 	tvout->drm_dev = drm_dev;
 
-	/* set preformatter matrix */
-	for (i = 0; i < 8; i++) {
-		tvout_write(tvout, rgb_to_ycbcr_601[i],
-			TVO_CSC_MAIN_M0 + (i * 4));
-		tvout_write(tvout, rgb_to_ycbcr_601[i],
-			TVO_CSC_AUX_M0 + (i * 4));
-	}
-
 	sti_tvout_create_encoders(drm_dev, tvout);
 
+	if (tvout_debugfs_init(tvout, drm_dev->primary))
+		DRM_ERROR("TVOUT debugfs setup failed\n");
+
 	return 0;
 }
 
@@ -665,8 +829,11 @@
 	void *data)
 {
 	struct sti_tvout *tvout = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
 
 	sti_tvout_destroy_encoders(tvout);
+
+	tvout_debugfs_exit(tvout, drm_dev->primary);
 }
 
 static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index a8254cc..5a2c5dc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -42,6 +42,104 @@
 #define VID_MPR1_BT709          0x0AC50000
 #define VID_MPR2_BT709          0x07150545
 #define VID_MPR3_BT709          0x00000AE8
+/* YCbCr to RGB BT709:
+ * R = Y+1.3711Cr
+ * G = Y-0.6992Cr-0.3359Cb
+ * B = Y+1.7344Cb
+ */
+#define VID_MPR0_BT601          0x0A800000
+#define VID_MPR1_BT601          0x0AAF0000
+#define VID_MPR2_BT601          0x094E0754
+#define VID_MPR3_BT601          0x00000ADD
+
+#define VID_MIN_HD_HEIGHT       720
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+				   readl(vid->regs + reg))
+
+static void vid_dbg_ctl(struct seq_file *s, int val)
+{
+	val = val >> 30;
+	seq_puts(s, "\t");
+
+	if (!(val & 1))
+		seq_puts(s, "NOT ");
+	seq_puts(s, "ignored on main mixer - ");
+
+	if (!(val & 2))
+		seq_puts(s, "NOT ");
+	seq_puts(s, "ignored on aux mixer");
+}
+
+static void vid_dbg_vpo(struct seq_file *s, int val)
+{
+	seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_vps(struct seq_file *s, int val)
+{
+	seq_printf(s, "\txds:%4d\tyds:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_mst(struct seq_file *s, int val)
+{
+	if (val & 1)
+		seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int vid_dbg_show(struct seq_file *s, void *arg)
+{
+	struct drm_info_node *node = s->private;
+	struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
+
+	DBGFS_DUMP(VID_CTL);
+	vid_dbg_ctl(s, readl(vid->regs + VID_CTL));
+	DBGFS_DUMP(VID_ALP);
+	DBGFS_DUMP(VID_CLF);
+	DBGFS_DUMP(VID_VPO);
+	vid_dbg_vpo(s, readl(vid->regs + VID_VPO));
+	DBGFS_DUMP(VID_VPS);
+	vid_dbg_vps(s, readl(vid->regs + VID_VPS));
+	DBGFS_DUMP(VID_KEY1);
+	DBGFS_DUMP(VID_KEY2);
+	DBGFS_DUMP(VID_MPR0);
+	DBGFS_DUMP(VID_MPR1);
+	DBGFS_DUMP(VID_MPR2);
+	DBGFS_DUMP(VID_MPR3);
+	DBGFS_DUMP(VID_MST);
+	vid_dbg_mst(s, readl(vid->regs + VID_MST));
+	DBGFS_DUMP(VID_BC);
+	DBGFS_DUMP(VID_TINT);
+	DBGFS_DUMP(VID_CSAT);
+	seq_puts(s, "\n");
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static struct drm_info_list vid_debugfs_files[] = {
+	{ "vid", vid_dbg_show, 0, NULL },
+};
+
+static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
+		vid_debugfs_files[i].data = vid;
+
+	return drm_debugfs_create_files(vid_debugfs_files,
+					ARRAY_SIZE(vid_debugfs_files),
+					minor->debugfs_root, minor);
+}
 
 void sti_vid_commit(struct sti_vid *vid,
 		    struct drm_plane_state *state)
@@ -52,6 +150,7 @@
 	int dst_y = state->crtc_y;
 	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
 	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	int src_h = state->src_h >> 16;
 	u32 val, ydo, xdo, yds, xds;
 
 	/* Input / output size
@@ -71,6 +170,19 @@
 
 	writel((ydo << 16) | xdo, vid->regs + VID_VPO);
 	writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+	/* Color conversion parameters */
+	if (src_h >= VID_MIN_HD_HEIGHT) {
+		writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+		writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+		writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+		writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+	} else {
+		writel(VID_MPR0_BT601, vid->regs + VID_MPR0);
+		writel(VID_MPR1_BT601, vid->regs + VID_MPR1);
+		writel(VID_MPR2_BT601, vid->regs + VID_MPR2);
+		writel(VID_MPR3_BT601, vid->regs + VID_MPR3);
+	}
 }
 
 void sti_vid_disable(struct sti_vid *vid)
@@ -91,20 +203,14 @@
 	/* Opaque */
 	writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
 
-	/* Color conversion parameters */
-	writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
-	writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
-	writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
-	writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
-
 	/* Brightness, contrast, tint, saturation */
 	writel(VID_BC_DFLT, vid->regs + VID_BC);
 	writel(VID_TINT_DFLT, vid->regs + VID_TINT);
 	writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
 }
 
-struct sti_vid *sti_vid_create(struct device *dev, int id,
-			       void __iomem *baseaddr)
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+			       int id, void __iomem *baseaddr)
 {
 	struct sti_vid *vid;
 
@@ -120,5 +226,8 @@
 
 	sti_vid_init(vid);
 
+	if (vid_debugfs_init(vid, drm_dev->primary))
+		DRM_ERROR("VID debugfs setup failed\n");
+
 	return vid;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 5dea479..6c84234 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -23,7 +23,7 @@
 void sti_vid_commit(struct sti_vid *vid,
 		    struct drm_plane_state *state);
 void sti_vid_disable(struct sti_vid *vid);
-struct sti_vid *sti_vid_create(struct device *dev, int id,
-			       void __iomem *baseaddr);
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+			       int id, void __iomem *baseaddr);
 
 #endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index d56630c..32c7986 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -15,8 +15,8 @@
 
 #include "sti_vtg.h"
 
-#define VTG_TYPE_MASTER         0
-#define VTG_TYPE_SLAVE_BY_EXT0  1
+#define VTG_MODE_MASTER         0
+#define VTG_MODE_SLAVE_BY_EXT0  1
 
 /* registers offset */
 #define VTG_MODE            0x0000
@@ -64,6 +64,9 @@
 /* Delay introduced by the HDMI in nb of pixel */
 #define HDMI_DELAY          (5)
 
+/* Delay introduced by the DVO in nb of pixel */
+#define DVO_DELAY           (2)
+
 /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
 #define AWG_DELAY_HD        (-9)
 #define AWG_DELAY_ED        (-8)
@@ -71,13 +74,61 @@
 
 LIST_HEAD(vtg_lookup);
 
+/*
+ * STI VTG register offset structure
+ *
+ *@h_hd:     stores the VTG_H_HD_x     register offset
+ *@top_v_vd: stores the VTG_TOP_V_VD_x register offset
+ *@bot_v_vd: stores the VTG_BOT_V_VD_x register offset
+ *@top_v_hd: stores the VTG_TOP_V_HD_x register offset
+ *@bot_v_hd: stores the VTG_BOT_V_HD_x register offset
+ */
+struct sti_vtg_regs_offs {
+	u32 h_hd;
+	u32 top_v_vd;
+	u32 bot_v_vd;
+	u32 top_v_hd;
+	u32 bot_v_hd;
+};
+
+#define VTG_MAX_SYNC_OUTPUT 4
+static const struct sti_vtg_regs_offs vtg_regs_offs[VTG_MAX_SYNC_OUTPUT] = {
+	{ VTG_H_HD_1,
+	  VTG_TOP_V_VD_1, VTG_BOT_V_VD_1, VTG_TOP_V_HD_1, VTG_BOT_V_HD_1 },
+	{ VTG_H_HD_2,
+	  VTG_TOP_V_VD_2, VTG_BOT_V_VD_2, VTG_TOP_V_HD_2, VTG_BOT_V_HD_2 },
+	{ VTG_H_HD_3,
+	  VTG_TOP_V_VD_3, VTG_BOT_V_VD_3, VTG_TOP_V_HD_3, VTG_BOT_V_HD_3 },
+	{ VTG_H_HD_4,
+	  VTG_TOP_V_VD_4, VTG_BOT_V_VD_4, VTG_TOP_V_HD_4, VTG_BOT_V_HD_4 }
+};
+
+/*
+ * STI VTG synchronisation parameters structure
+ *
+ *@hsync: sample number falling and rising edge
+ *@vsync_line_top: vertical top field line number falling and rising edge
+ *@vsync_line_bot: vertical bottom field line number falling and rising edge
+ *@vsync_off_top: vertical top field sample number rising and falling edge
+ *@vsync_off_bot: vertical bottom field sample number rising and falling edge
+ */
+struct sti_vtg_sync_params {
+	u32 hsync;
+	u32 vsync_line_top;
+	u32 vsync_line_bot;
+	u32 vsync_off_top;
+	u32 vsync_off_bot;
+};
+
 /**
  * STI VTG structure
  *
  * @dev: pointer to device driver
- * @data: data associated to the device
+ * @np: device node
+ * @regs: register mapping
+ * @sync_params: synchronisation parameters used to generate timings
  * @irq: VTG irq
- * @type: VTG type (main or aux)
+ * @irq_status: store the IRQ status value
  * @notifier_list: notifier callback
  * @crtc: the CRTC for vblank event
  * @slave: slave vtg
@@ -87,6 +138,7 @@
 	struct device *dev;
 	struct device_node *np;
 	void __iomem *regs;
+	struct sti_vtg_sync_params sync_params[VTG_MAX_SYNC_OUTPUT];
 	int irq;
 	u32 irq_status;
 	struct raw_notifier_head notifier_list;
@@ -146,13 +198,69 @@
 	writel(video_bottom_field_stop, regs + VTG_VID_BFS);
 }
 
-static void vtg_set_mode(struct sti_vtg *vtg,
-			 int type, const struct drm_display_mode *mode)
+static void vtg_set_hsync_vsync_pos(struct sti_vtg_sync_params *sync,
+				    int delay,
+				    const struct drm_display_mode *mode)
 {
-	u32 tmp;
+	long clocksperline, start, stop;
+	u32 risesync_top, fallsync_top;
+	u32 risesync_offs_top, fallsync_offs_top;
+
+	clocksperline = mode->htotal;
+
+	/* Get the hsync position */
+	start = 0;
+	stop = mode->hsync_end - mode->hsync_start;
+
+	start += delay;
+	stop  += delay;
+
+	if (start < 0)
+		start += clocksperline;
+	else if (start >= clocksperline)
+		start -= clocksperline;
+
+	if (stop < 0)
+		stop += clocksperline;
+	else if (stop >= clocksperline)
+		stop -= clocksperline;
+
+	sync->hsync = (stop << 16) | start;
+
+	/* Get the vsync position */
+	if (delay >= 0) {
+		risesync_top = 1;
+		fallsync_top = risesync_top;
+		fallsync_top += mode->vsync_end - mode->vsync_start;
+
+		fallsync_offs_top = (u32)delay;
+		risesync_offs_top = (u32)delay;
+	} else {
+		risesync_top = mode->vtotal;
+		fallsync_top = mode->vsync_end - mode->vsync_start;
+
+		fallsync_offs_top = clocksperline + delay;
+		risesync_offs_top = clocksperline + delay;
+	}
+
+	sync->vsync_line_top = (fallsync_top << 16) | risesync_top;
+	sync->vsync_off_top = (fallsync_offs_top << 16) | risesync_offs_top;
+
+	/* Only progressive supported for now */
+	sync->vsync_line_bot = sync->vsync_line_top;
+	sync->vsync_off_bot = sync->vsync_off_top;
+}
+
+static void vtg_set_mode(struct sti_vtg *vtg,
+			 int type,
+			 struct sti_vtg_sync_params *sync,
+			 const struct drm_display_mode *mode)
+{
+	unsigned int i;
 
 	if (vtg->slave)
-		vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+		vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0,
+			     vtg->sync_params, mode);
 
 	/* Set the number of clock cycles per line */
 	writel(mode->htotal, vtg->regs + VTG_CLKLN);
@@ -163,57 +271,31 @@
 	/* Program output window */
 	vtg_set_output_window(vtg->regs, mode);
 
-	/* prepare VTG set 1 for HDMI */
-	tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
-	tmp |= HDMI_DELAY;
-	writel(tmp, vtg->regs + VTG_H_HD_1);
+	/* Set hsync and vsync position for HDMI */
+	vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDMI - 1], HDMI_DELAY, mode);
 
-	tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
-	tmp |= 1;
-	writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
-	writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
+	/* Set hsync and vsync position for HD DCS */
+	vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDDCS - 1], 0, mode);
 
-	tmp = HDMI_DELAY << 16;
-	tmp |= HDMI_DELAY;
-	writel(tmp, vtg->regs + VTG_TOP_V_HD_1);
-	writel(tmp, vtg->regs + VTG_BOT_V_HD_1);
+	/* Set hsync and vsync position for HDF */
+	vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDF - 1], AWG_DELAY_HD, mode);
 
-	/* prepare VTG set 2 for for HD DCS */
-	tmp = (mode->hsync_end - mode->hsync_start) << 16;
-	writel(tmp, vtg->regs + VTG_H_HD_2);
+	/* Set hsync and vsync position for DVO */
+	vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_DVO - 1], DVO_DELAY, mode);
 
-	tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
-	tmp |= 1;
-	writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
-	writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
-	writel(0, vtg->regs + VTG_TOP_V_HD_2);
-	writel(0, vtg->regs + VTG_BOT_V_HD_2);
-
-	/* prepare VTG set 3 for HD Analog in HD mode */
-	tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
-	tmp |= mode->htotal + AWG_DELAY_HD;
-	writel(tmp, vtg->regs + VTG_H_HD_3);
-
-	tmp = (mode->vsync_end - mode->vsync_start) << 16;
-	tmp |= mode->vtotal;
-	writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
-	writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
-
-	tmp = (mode->htotal + AWG_DELAY_HD) << 16;
-	tmp |= mode->htotal + AWG_DELAY_HD;
-	writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
-	writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
-
-	/* Prepare VTG set 4 for DVO */
-	tmp = (mode->hsync_end - mode->hsync_start) << 16;
-	writel(tmp, vtg->regs + VTG_H_HD_4);
-
-	tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
-	tmp |= 1;
-	writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
-	writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
-	writel(0, vtg->regs + VTG_TOP_V_HD_4);
-	writel(0, vtg->regs + VTG_BOT_V_HD_4);
+	/* Progam the syncs outputs */
+	for (i = 0; i < VTG_MAX_SYNC_OUTPUT ; i++) {
+		writel(sync[i].hsync,
+		       vtg->regs + vtg_regs_offs[i].h_hd);
+		writel(sync[i].vsync_line_top,
+		       vtg->regs + vtg_regs_offs[i].top_v_vd);
+		writel(sync[i].vsync_line_bot,
+		       vtg->regs + vtg_regs_offs[i].bot_v_vd);
+		writel(sync[i].vsync_off_top,
+		       vtg->regs + vtg_regs_offs[i].top_v_hd);
+		writel(sync[i].vsync_off_bot,
+		       vtg->regs + vtg_regs_offs[i].bot_v_hd);
+	}
 
 	/* mode */
 	writel(type, vtg->regs + VTG_MODE);
@@ -231,7 +313,7 @@
 		const struct drm_display_mode *mode)
 {
 	/* write configuration */
-	vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+	vtg_set_mode(vtg, VTG_MODE_MASTER, vtg->sync_params, mode);
 
 	vtg_reset(vtg);
 
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
index cd2439f..f1dcdf9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.h
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -10,6 +10,11 @@
 #define VTG_TOP_FIELD_EVENT     1
 #define VTG_BOTTOM_FIELD_EVENT  2
 
+#define VTG_SYNC_ID_HDMI        1
+#define VTG_SYNC_ID_HDDCS       2
+#define VTG_SYNC_ID_HDF         3
+#define VTG_SYNC_ID_DVO         4
+
 struct sti_vtg;
 struct drm_display_mode;
 struct notifier_block;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index dde6f20..fb2b4b0 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -988,23 +988,6 @@
 	spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-	struct drm_device *drm = crtc->dev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&drm->event_lock, flags);
-
-	if (dc->event && dc->event->base.file_priv == file) {
-		dc->event->base.destroy(&dc->event->base);
-		drm_crtc_vblank_put(crtc);
-		dc->event = NULL;
-	}
-
-	spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
 static void tegra_dc_destroy(struct drm_crtc *crtc)
 {
 	drm_crtc_cleanup(crtc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c5c856a..8e6b18c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -858,10 +858,6 @@
 {
 	struct tegra_drm_file *fpriv = file->driver_priv;
 	struct tegra_drm_context *context, *tmp;
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-		tegra_dc_cancel_page_flip(crtc, file);
 
 	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
 		tegra_drm_context_free(context);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index c088f2f..8a10f5b 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -195,7 +195,6 @@
 u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
 void tegra_dc_enable_vblank(struct tegra_dc *dc);
 void tegra_dc_disable_vblank(struct tegra_dc *dc);
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 void tegra_dc_commit(struct tegra_dc *dc);
 int tegra_dc_state_setup_clock(struct tegra_dc *dc,
 			       struct drm_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7d07733..051e5e1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -21,25 +21,31 @@
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
 
+#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+
 struct tilcdc_crtc {
 	struct drm_crtc base;
 
 	const struct tilcdc_panel_info *info;
-	uint32_t dirty;
-	dma_addr_t start, end;
 	struct drm_pending_vblank_event *event;
 	int dpms;
 	wait_queue_head_t frame_done_wq;
 	bool frame_done;
+	spinlock_t irq_lock;
 
-	/* fb currently set to scanout 0/1: */
-	struct drm_framebuffer *scanout[2];
+	ktime_t last_vblank;
+
+	struct drm_framebuffer *curr_fb;
+	struct drm_framebuffer *next_fb;
 
 	/* for deferred fb unref's: */
 	struct drm_flip_work unref_work;
 
 	/* Only set if an external encoder is connected */
 	bool simulate_vesa_sync;
+
+	int sync_lost_count;
+	bool frame_intact;
 };
 #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
 
@@ -54,79 +60,53 @@
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
-static void set_scanout(struct drm_crtc *crtc, int n)
-{
-	static const uint32_t base_reg[] = {
-			LCDC_DMA_FB_BASE_ADDR_0_REG,
-			LCDC_DMA_FB_BASE_ADDR_1_REG,
-	};
-	static const uint32_t ceil_reg[] = {
-			LCDC_DMA_FB_CEILING_ADDR_0_REG,
-			LCDC_DMA_FB_CEILING_ADDR_1_REG,
-	};
-	static const uint32_t stat[] = {
-			LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
-	};
-	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	struct tilcdc_drm_private *priv = dev->dev_private;
-
-	pm_runtime_get_sync(dev->dev);
-	tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
-	tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
-	if (tilcdc_crtc->scanout[n]) {
-		drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
-		drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-	}
-	tilcdc_crtc->scanout[n] = crtc->primary->fb;
-	drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
-	tilcdc_crtc->dirty &= ~stat[n];
-	pm_runtime_put_sync(dev->dev);
-}
-
-static void update_scanout(struct drm_crtc *crtc)
+static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
-	struct drm_framebuffer *fb = crtc->primary->fb;
 	struct drm_gem_cma_object *gem;
 	unsigned int depth, bpp;
+	dma_addr_t start, end;
 
 	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
 	gem = drm_fb_cma_get_gem_obj(fb, 0);
 
-	tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
-			(crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+	start = gem->paddr + fb->offsets[0] +
+		crtc->y * fb->pitches[0] +
+		crtc->x * bpp / 8;
 
-	tilcdc_crtc->end = tilcdc_crtc->start +
-			(crtc->mode.vdisplay * fb->pitches[0]);
+	end = start + (crtc->mode.vdisplay * fb->pitches[0]);
 
-	if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
-		/* already enabled, so just mark the frames that need
-		 * updating and they will be updated on vblank:
-		 */
-		tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
-		drm_vblank_get(dev, 0);
-	} else {
-		/* not enabled yet, so update registers immediately: */
-		set_scanout(crtc, 0);
-		set_scanout(crtc, 1);
-	}
+	tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
+	tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+
+	if (tilcdc_crtc->curr_fb)
+		drm_flip_work_queue(&tilcdc_crtc->unref_work,
+			tilcdc_crtc->curr_fb);
+
+	tilcdc_crtc->curr_fb = fb;
+}
+
+static void reset(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	if (priv->rev != 2)
+		return;
+
+	tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+	usleep_range(250, 1000);
+	tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
 }
 
 static void start(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct tilcdc_drm_private *priv = dev->dev_private;
 
-	if (priv->rev == 2) {
-		tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
-		msleep(1);
-		tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
-		msleep(1);
-	}
+	reset(crtc);
 
-	tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+	tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
 	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
 }
@@ -138,17 +118,31 @@
 	tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
 }
 
-static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
 static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 
 	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
+	of_node_put(crtc->port);
 	drm_crtc_cleanup(crtc);
 	drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+}
 
-	kfree(tilcdc_crtc);
+static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	unsigned int depth, bpp;
+
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+
+	if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) {
+		dev_err(dev->dev,
+			"Invalid pitch: fb and crtc widths must be the same");
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
 static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
@@ -158,20 +152,48 @@
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
+	int r;
+	unsigned long flags;
+	s64 tdiff;
+	ktime_t next_vblank;
+
+	r = tilcdc_verify_fb(crtc, fb);
+	if (r)
+		return r;
 
 	if (tilcdc_crtc->event) {
 		dev_err(dev->dev, "already pending page flip!\n");
 		return -EBUSY;
 	}
 
+	drm_framebuffer_reference(fb);
+
 	crtc->primary->fb = fb;
+
+	pm_runtime_get_sync(dev->dev);
+
+	spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+	next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+		1000000 / crtc->hwmode.vrefresh);
+
+	tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+	if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+		set_scanout(crtc, fb);
+	else
+		tilcdc_crtc->next_fb = fb;
+
 	tilcdc_crtc->event = event;
-	update_scanout(crtc);
+
+	spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
+	pm_runtime_put_sync(dev->dev);
 
 	return 0;
 }
 
-static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -186,10 +208,8 @@
 
 	tilcdc_crtc->dpms = mode;
 
-	pm_runtime_get_sync(dev->dev);
-
 	if (mode == DRM_MODE_DPMS_ON) {
-		pm_runtime_forbid(dev->dev);
+		pm_runtime_get_sync(dev->dev);
 		start(crtc);
 	} else {
 		tilcdc_crtc->frame_done = false;
@@ -207,10 +227,23 @@
 			if (ret == 0)
 				dev_err(dev->dev, "timeout waiting for framedone\n");
 		}
-		pm_runtime_allow(dev->dev);
-	}
 
-	pm_runtime_put_sync(dev->dev);
+		pm_runtime_put_sync(dev->dev);
+
+		if (tilcdc_crtc->next_fb) {
+			drm_flip_work_queue(&tilcdc_crtc->unref_work,
+					    tilcdc_crtc->next_fb);
+			tilcdc_crtc->next_fb = NULL;
+		}
+
+		if (tilcdc_crtc->curr_fb) {
+			drm_flip_work_queue(&tilcdc_crtc->unref_work,
+					    tilcdc_crtc->curr_fb);
+			tilcdc_crtc->curr_fb = NULL;
+		}
+
+		drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+	}
 }
 
 static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -272,6 +305,10 @@
 	if (WARN_ON(!info))
 		return -EINVAL;
 
+	ret = tilcdc_verify_fb(crtc, crtc->primary->fb);
+	if (ret)
+		return ret;
+
 	pm_runtime_get_sync(dev->dev);
 
 	/* Configure the Burst Size and fifo threshold of DMA: */
@@ -419,8 +456,10 @@
 	else
 		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
 
+	drm_framebuffer_reference(crtc->primary->fb);
 
-	update_scanout(crtc);
+	set_scanout(crtc, crtc->primary->fb);
+
 	tilcdc_crtc_update_clk(crtc);
 
 	pm_runtime_put_sync(dev->dev);
@@ -431,7 +470,21 @@
 static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
 		struct drm_framebuffer *old_fb)
 {
-	update_scanout(crtc);
+	struct drm_device *dev = crtc->dev;
+	int r;
+
+	r = tilcdc_verify_fb(crtc, crtc->primary->fb);
+	if (r)
+		return r;
+
+	drm_framebuffer_reference(crtc->primary->fb);
+
+	pm_runtime_get_sync(dev->dev);
+
+	set_scanout(crtc, crtc->primary->fb);
+
+	pm_runtime_put_sync(dev->dev);
+
 	return 0;
 }
 
@@ -573,7 +626,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct tilcdc_drm_private *priv = dev->dev_private;
 	int dpms = tilcdc_crtc->dpms;
-	unsigned int lcd_clk, div;
+	unsigned long lcd_clk;
+	const unsigned clkdiv = 2; /* using a fixed divider of 2 */
 	int ret;
 
 	pm_runtime_get_sync(dev->dev);
@@ -581,22 +635,21 @@
 	if (dpms == DRM_MODE_DPMS_ON)
 		tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
-	/* in raster mode, minimum divisor is 2: */
-	ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
-	if (ret) {
+	/* mode.clock is in KHz, set_rate wants parameter in Hz */
+	ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
+	if (ret < 0) {
 		dev_err(dev->dev, "failed to set display clock rate to: %d\n",
 				crtc->mode.clock);
 		goto out;
 	}
 
 	lcd_clk = clk_get_rate(priv->clk);
-	div = lcd_clk / (crtc->mode.clock * 1000);
 
-	DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
-	DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+	DBG("lcd_clk=%lu, mode clock=%d, div=%u",
+		lcd_clk, crtc->mode.clock, clkdiv);
 
 	/* Configure the LCD clock divisor. */
-	tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+	tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
 			LCDC_RASTER_MODE);
 
 	if (priv->rev == 2)
@@ -611,44 +664,58 @@
 	pm_runtime_put_sync(dev->dev);
 }
 
+#define SYNC_LOST_COUNT_LIMIT 50
+
 irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
 {
 	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
 	struct tilcdc_drm_private *priv = dev->dev_private;
-	uint32_t stat = tilcdc_read_irqstatus(dev);
+	uint32_t stat;
 
-	if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
-		stop(crtc);
-		dev_err(dev->dev, "error: %08x\n", stat);
-		tilcdc_clear_irqstatus(dev, stat);
-		start(crtc);
-	} else if (stat & LCDC_PL_LOAD_DONE) {
-		tilcdc_clear_irqstatus(dev, stat);
-	} else {
-		struct drm_pending_vblank_event *event;
+	stat = tilcdc_read_irqstatus(dev);
+	tilcdc_clear_irqstatus(dev, stat);
+
+	if (stat & LCDC_END_OF_FRAME0) {
 		unsigned long flags;
-		uint32_t dirty = tilcdc_crtc->dirty & stat;
+		bool skip_event = false;
+		ktime_t now;
 
-		tilcdc_clear_irqstatus(dev, stat);
+		now = ktime_get();
 
-		if (dirty & LCDC_END_OF_FRAME0)
-			set_scanout(crtc, 0);
+		drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
 
-		if (dirty & LCDC_END_OF_FRAME1)
-			set_scanout(crtc, 1);
+		spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+		tilcdc_crtc->last_vblank = now;
+
+		if (tilcdc_crtc->next_fb) {
+			set_scanout(crtc, tilcdc_crtc->next_fb);
+			tilcdc_crtc->next_fb = NULL;
+			skip_event = true;
+		}
+
+		spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
 		drm_handle_vblank(dev, 0);
 
-		spin_lock_irqsave(&dev->event_lock, flags);
-		event = tilcdc_crtc->event;
-		tilcdc_crtc->event = NULL;
-		if (event)
-			drm_send_vblank_event(dev, 0, event);
-		spin_unlock_irqrestore(&dev->event_lock, flags);
+		if (!skip_event) {
+			struct drm_pending_vblank_event *event;
 
-		if (dirty && !tilcdc_crtc->dirty)
-			drm_vblank_put(dev, 0);
+			spin_lock_irqsave(&dev->event_lock, flags);
+
+			event = tilcdc_crtc->event;
+			tilcdc_crtc->event = NULL;
+			if (event)
+				drm_send_vblank_event(dev, 0, event);
+
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+		}
+
+		if (tilcdc_crtc->frame_intact)
+			tilcdc_crtc->sync_lost_count = 0;
+		else
+			tilcdc_crtc->frame_intact = true;
 	}
 
 	if (priv->rev == 2) {
@@ -659,36 +726,34 @@
 		tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
 	}
 
-	return IRQ_HANDLED;
-}
-
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-	struct drm_pending_vblank_event *event;
-	struct drm_device *dev = crtc->dev;
-	unsigned long flags;
-
-	/* Destroy the pending vertical blanking event associated with the
-	 * pending page flip, if any, and disable vertical blanking interrupts.
-	 */
-	spin_lock_irqsave(&dev->event_lock, flags);
-	event = tilcdc_crtc->event;
-	if (event && event->base.file_priv == file) {
-		tilcdc_crtc->event = NULL;
-		event->base.destroy(&event->base);
-		drm_vblank_put(dev, 0);
+	if (stat & LCDC_SYNC_LOST) {
+		dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+				    __func__, stat);
+		tilcdc_crtc->frame_intact = false;
+		if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) {
+			dev_err(dev->dev,
+				"%s(0x%08x): Sync lost flood detected, disabling the interrupt",
+				__func__, stat);
+			tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+				     LCDC_SYNC_LOST);
+		}
 	}
-	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (stat & LCDC_FIFO_UNDERFLOW)
+		dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+				    __func__, stat);
+
+	return IRQ_HANDLED;
 }
 
 struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
 {
+	struct tilcdc_drm_private *priv = dev->dev_private;
 	struct tilcdc_crtc *tilcdc_crtc;
 	struct drm_crtc *crtc;
 	int ret;
 
-	tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+	tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
 	if (!tilcdc_crtc) {
 		dev_err(dev->dev, "allocation failed\n");
 		return NULL;
@@ -702,12 +767,32 @@
 	drm_flip_work_init(&tilcdc_crtc->unref_work,
 			"unref", unref_worker);
 
+	spin_lock_init(&tilcdc_crtc->irq_lock);
+
 	ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
 	if (ret < 0)
 		goto fail;
 
 	drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
 
+	if (priv->is_componentized) {
+		struct device_node *ports =
+			of_get_child_by_name(dev->dev->of_node, "ports");
+
+		if (ports) {
+			crtc->port = of_get_child_by_name(ports, "port");
+			of_node_put(ports);
+		} else {
+			crtc->port =
+				of_get_child_by_name(dev->dev->of_node, "port");
+		}
+		if (!crtc->port) { /* This should never happen */
+			dev_err(dev->dev, "Port node not found in %s\n",
+				dev->dev->of_node->full_name);
+			goto fail;
+		}
+	}
+
 	return crtc;
 
 fail:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d7f5b89..709bc90 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -18,6 +18,8 @@
 /* LCDC DRM driver, based on da8xx-fb */
 
 #include <linux/component.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/suspend.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -110,6 +112,8 @@
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
 
+	tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
+
 	tilcdc_remove_external_encoders(dev);
 
 	drm_fbdev_cma_fini(priv->fbdev);
@@ -139,11 +143,11 @@
 
 	pm_runtime_disable(dev->dev);
 
-	kfree(priv);
-
 	return 0;
 }
 
+static size_t tilcdc_num_regs(void);
+
 static int tilcdc_load(struct drm_device *dev, unsigned long flags)
 {
 	struct platform_device *pdev = dev->platformdev;
@@ -154,8 +158,12 @@
 	u32 bpp = 0;
 	int ret;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
+	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (priv)
+		priv->saved_register =
+			devm_kcalloc(dev->dev, tilcdc_num_regs(),
+				     sizeof(*priv->saved_register), GFP_KERNEL);
+	if (!priv || !priv->saved_register) {
 		dev_err(dev->dev, "failed to allocate private data\n");
 		return -ENOMEM;
 	}
@@ -168,7 +176,7 @@
 	priv->wq = alloc_ordered_workqueue("tilcdc", 0);
 	if (!priv->wq) {
 		ret = -ENOMEM;
-		goto fail_free_priv;
+		goto fail_unset_priv;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -192,13 +200,6 @@
 		goto fail_iounmap;
 	}
 
-	priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
-	if (IS_ERR(priv->clk)) {
-		dev_err(dev->dev, "failed to get display clock\n");
-		ret = -ENODEV;
-		goto fail_put_clk;
-	}
-
 #ifdef CONFIG_CPU_FREQ
 	priv->lcd_fck_rate = clk_get_rate(priv->clk);
 	priv->freq_transition.notifier_call = cpufreq_transition;
@@ -206,7 +207,7 @@
 			CPUFREQ_TRANSITION_NOTIFIER);
 	if (ret) {
 		dev_err(dev->dev, "failed to register cpufreq notifier\n");
-		goto fail_put_disp_clk;
+		goto fail_put_clk;
 	}
 #endif
 
@@ -227,7 +228,6 @@
 	DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
 
 	pm_runtime_enable(dev->dev);
-	pm_runtime_irq_safe(dev->dev);
 
 	/* Determine LCD IP Version */
 	pm_runtime_get_sync(dev->dev);
@@ -330,11 +330,9 @@
 #ifdef CONFIG_CPU_FREQ
 	cpufreq_unregister_notifier(&priv->freq_transition,
 			CPUFREQ_TRANSITION_NOTIFIER);
-fail_put_disp_clk:
-	clk_put(priv->disp_clk);
-#endif
 
 fail_put_clk:
+#endif
 	clk_put(priv->clk);
 
 fail_iounmap:
@@ -344,19 +342,12 @@
 	flush_workqueue(priv->wq);
 	destroy_workqueue(priv->wq);
 
-fail_free_priv:
+fail_unset_priv:
 	dev->dev_private = NULL;
-	kfree(priv);
+
 	return ret;
 }
 
-static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct tilcdc_drm_private *priv = dev->dev_private;
-
-	tilcdc_crtc_cancel_page_flip(priv->crtc, file);
-}
-
 static void tilcdc_lastclose(struct drm_device *dev)
 {
 	struct tilcdc_drm_private *priv = dev->dev_private;
@@ -380,10 +371,14 @@
 	struct tilcdc_drm_private *priv = dev->dev_private;
 
 	/* enable FIFO underflow irq: */
-	if (priv->rev == 1)
+	if (priv->rev == 1) {
 		tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
-	else
-		tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+	} else {
+		tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
+			   LCDC_V2_UNDERFLOW_INT_ENA |
+			   LCDC_V2_END_OF_FRAME0_INT_ENA |
+			   LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+	}
 
 	return 0;
 }
@@ -398,43 +393,21 @@
 				LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
 		tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
 	} else {
-		tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+		tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
 			LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
-			LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
-			LCDC_FRAME_DONE);
+			LCDC_V2_END_OF_FRAME0_INT_ENA |
+			LCDC_FRAME_DONE | LCDC_SYNC_LOST);
 	}
-
-}
-
-static void enable_vblank(struct drm_device *dev, bool enable)
-{
-	struct tilcdc_drm_private *priv = dev->dev_private;
-	u32 reg, mask;
-
-	if (priv->rev == 1) {
-		reg = LCDC_DMA_CTRL_REG;
-		mask = LCDC_V1_END_OF_FRAME_INT_ENA;
-	} else {
-		reg = LCDC_INT_ENABLE_SET_REG;
-		mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
-			LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
-	}
-
-	if (enable)
-		tilcdc_set(dev, reg, mask);
-	else
-		tilcdc_clear(dev, reg, mask);
 }
 
 static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	enable_vblank(dev, true);
 	return 0;
 }
 
 static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	enable_vblank(dev, false);
+	return;
 }
 
 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
@@ -461,13 +434,22 @@
 		/* new in revision 2: */
 		REG(2, false, LCDC_RAW_STAT_REG),
 		REG(2, false, LCDC_MASKED_STAT_REG),
-		REG(2, false, LCDC_INT_ENABLE_SET_REG),
+		REG(2, true, LCDC_INT_ENABLE_SET_REG),
 		REG(2, false, LCDC_INT_ENABLE_CLR_REG),
 		REG(2, false, LCDC_END_OF_INT_IND_REG),
 		REG(2, true,  LCDC_CLK_ENABLE_REG),
-		REG(2, true,  LCDC_INT_ENABLE_SET_REG),
 #undef REG
 };
+
+static size_t tilcdc_num_regs(void)
+{
+	return ARRAY_SIZE(registers);
+}
+#else
+static size_t tilcdc_num_regs(void)
+{
+	return 0;
+}
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -554,10 +536,10 @@
 };
 
 static struct drm_driver tilcdc_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features    = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
+			       DRIVER_PRIME),
 	.load               = tilcdc_load,
 	.unload             = tilcdc_unload,
-	.preclose           = tilcdc_preclose,
 	.lastclose          = tilcdc_lastclose,
 	.set_busid          = drm_platform_set_busid,
 	.irq_handler        = tilcdc_irq,
@@ -572,6 +554,16 @@
 	.dumb_create        = drm_gem_cma_dumb_create,
 	.dumb_map_offset    = drm_gem_cma_dumb_map_offset,
 	.dumb_destroy       = drm_gem_dumb_destroy,
+
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
 #ifdef CONFIG_DEBUG_FS
 	.debugfs_init       = tilcdc_debugfs_init,
 	.debugfs_cleanup    = tilcdc_debugfs_cleanup,
@@ -597,11 +589,24 @@
 
 	drm_kms_helper_poll_disable(ddev);
 
+	/* Select sleep pin state */
+	pinctrl_pm_select_sleep_state(dev);
+
+	if (pm_runtime_suspended(dev)) {
+		priv->ctx_valid = false;
+		return 0;
+	}
+
+	/* Disable the LCDC controller, to avoid locking up the PRCM */
+	tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
+
 	/* Save register state: */
 	for (i = 0; i < ARRAY_SIZE(registers); i++)
 		if (registers[i].save && (priv->rev >= registers[i].rev))
 			priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
 
+	priv->ctx_valid = true;
+
 	return 0;
 }
 
@@ -611,10 +616,17 @@
 	struct tilcdc_drm_private *priv = ddev->dev_private;
 	unsigned i, n = 0;
 
-	/* Restore register state: */
-	for (i = 0; i < ARRAY_SIZE(registers); i++)
-		if (registers[i].save && (priv->rev >= registers[i].rev))
-			tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+	/* Select default pin state */
+	pinctrl_pm_select_default_state(dev);
+
+	if (priv->ctx_valid == true) {
+		/* Restore register state: */
+		for (i = 0; i < ARRAY_SIZE(registers); i++)
+			if (registers[i].save &&
+			    (priv->rev >= registers[i].rev))
+				tilcdc_write(ddev, registers[i].reg,
+					     priv->saved_register[n++]);
+	}
 
 	drm_kms_helper_poll_enable(ddev);
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index e863ad0..c1de18b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -49,7 +49,6 @@
 struct tilcdc_drm_private {
 	void __iomem *mmio;
 
-	struct clk *disp_clk;    /* display dpll */
 	struct clk *clk;         /* functional clock */
 	int rev;                 /* IP revision */
 
@@ -67,7 +66,8 @@
 	uint32_t max_width;
 
 	/* register contents saved across suspend/resume: */
-	u32 saved_register[12];
+	u32 *saved_register;
+	bool ctx_valid;
 
 #ifdef CONFIG_CPU_FREQ
 	struct notifier_block freq_transition;
@@ -163,7 +163,6 @@
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
 struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
 void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
 void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
@@ -172,5 +171,6 @@
 					bool simulate_vesa_sync);
 int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
 int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
 
 #endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 4dda6e2..ff7774c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -45,14 +45,6 @@
 };
 #define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
 
-
-static void panel_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
-	drm_encoder_cleanup(encoder);
-	kfree(panel_encoder);
-}
-
 static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
@@ -70,14 +62,6 @@
 					 mode == DRM_MODE_DPMS_ON ? 1 : 0);
 }
 
-static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	/* nothing needed */
-	return true;
-}
-
 static void panel_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
@@ -98,12 +82,11 @@
 }
 
 static const struct drm_encoder_funcs panel_encoder_funcs = {
-		.destroy        = panel_encoder_destroy,
+		.destroy        = drm_encoder_cleanup,
 };
 
 static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
 		.dpms           = panel_encoder_dpms,
-		.mode_fixup     = panel_encoder_mode_fixup,
 		.prepare        = panel_encoder_prepare,
 		.commit         = panel_encoder_commit,
 		.mode_set       = panel_encoder_mode_set,
@@ -116,7 +99,8 @@
 	struct drm_encoder *encoder;
 	int ret;
 
-	panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+	panel_encoder = devm_kzalloc(dev->dev, sizeof(*panel_encoder),
+				     GFP_KERNEL);
 	if (!panel_encoder) {
 		dev_err(dev->dev, "allocation failed\n");
 		return NULL;
@@ -137,7 +121,7 @@
 	return encoder;
 
 fail:
-	panel_encoder_destroy(encoder);
+	drm_encoder_cleanup(encoder);
 	return NULL;
 }
 
@@ -156,10 +140,8 @@
 
 static void panel_connector_destroy(struct drm_connector *connector)
 {
-	struct panel_connector *panel_connector = to_panel_connector(connector);
 	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
-	kfree(panel_connector);
 }
 
 static enum drm_connector_status panel_connector_detect(
@@ -232,7 +214,8 @@
 	struct drm_connector *connector;
 	int ret;
 
-	panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+	panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector),
+				       GFP_KERNEL);
 	if (!panel_connector) {
 		dev_err(dev->dev, "allocation failed\n");
 		return NULL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 5052a8a..7716f42 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -54,14 +54,6 @@
 };
 #define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
 
-
-static void tfp410_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
-	drm_encoder_cleanup(encoder);
-	kfree(tfp410_encoder);
-}
-
 static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
@@ -80,14 +72,6 @@
 	tfp410_encoder->dpms = mode;
 }
 
-static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	/* nothing needed */
-	return true;
-}
-
 static void tfp410_encoder_prepare(struct drm_encoder *encoder)
 {
 	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -107,12 +91,11 @@
 }
 
 static const struct drm_encoder_funcs tfp410_encoder_funcs = {
-		.destroy        = tfp410_encoder_destroy,
+		.destroy        = drm_encoder_cleanup,
 };
 
 static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
 		.dpms           = tfp410_encoder_dpms,
-		.mode_fixup     = tfp410_encoder_mode_fixup,
 		.prepare        = tfp410_encoder_prepare,
 		.commit         = tfp410_encoder_commit,
 		.mode_set       = tfp410_encoder_mode_set,
@@ -125,7 +108,8 @@
 	struct drm_encoder *encoder;
 	int ret;
 
-	tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+	tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
+				      GFP_KERNEL);
 	if (!tfp410_encoder) {
 		dev_err(dev->dev, "allocation failed\n");
 		return NULL;
@@ -147,7 +131,7 @@
 	return encoder;
 
 fail:
-	tfp410_encoder_destroy(encoder);
+	drm_encoder_cleanup(encoder);
 	return NULL;
 }
 
@@ -166,10 +150,8 @@
 
 static void tfp410_connector_destroy(struct drm_connector *connector)
 {
-	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
 	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
-	kfree(tfp410_connector);
 }
 
 static enum drm_connector_status tfp410_connector_detect(
@@ -237,7 +219,8 @@
 	struct drm_connector *connector;
 	int ret;
 
-	tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+	tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
+					GFP_KERNEL);
 	if (!tfp410_connector) {
 		dev_err(dev->dev, "allocation failed\n");
 		return NULL;
@@ -322,7 +305,7 @@
 		return -ENXIO;
 	}
 
-	tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+	tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), GFP_KERNEL);
 	if (!tfp410_mod)
 		return -ENOMEM;
 
@@ -375,7 +358,6 @@
 	i2c_put_adapter(tfp410_mod->i2c);
 
 fail:
-	kfree(tfp410_mod);
 	tilcdc_module_cleanup(mod);
 	return ret;
 }
@@ -389,7 +371,6 @@
 	gpio_free(tfp410_mod->gpio);
 
 	tilcdc_module_cleanup(mod);
-	kfree(tfp410_mod);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index d5728ec..772ec9e 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -125,17 +125,5 @@
 	.disconnect = udl_usb_disconnect,
 	.id_table = id_table,
 };
-
-static int __init udl_init(void)
-{
-	return usb_register(&udl_driver);
-}
-
-static void __exit udl_exit(void)
-{
-	usb_deregister(&udl_driver);
-}
-
-module_init(udl_init);
-module_exit(udl_exit);
+module_usb_driver(udl_driver);
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index a181a64..59a4b34 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -26,13 +26,6 @@
 {
 }
 
-static bool udl_mode_fixup(struct drm_encoder *encoder,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void udl_encoder_prepare(struct drm_encoder *encoder)
 {
 }
@@ -54,7 +47,6 @@
 
 static const struct drm_encoder_helper_funcs udl_helper_funcs = {
 	.dpms = udl_encoder_dpms,
-	.mode_fixup = udl_mode_fixup,
 	.prepare = udl_encoder_prepare,
 	.mode_set = udl_encoder_mode_set,
 	.commit = udl_encoder_commit,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 200419d..c427499 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -409,7 +409,6 @@
 
 	if (ufb->obj->base.import_attach) {
 		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
-					       0, ufb->obj->base.size,
 					       DMA_FROM_DEVICE);
 		if (ret)
 			goto unlock;
@@ -425,7 +424,6 @@
 
 	if (ufb->obj->base.import_attach) {
 		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
-				       0, ufb->obj->base.size,
 				       DMA_FROM_DEVICE);
 	}
 
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 160ef2a..b87afee 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -279,14 +279,6 @@
 
 }
 
-static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-
-{
-	return true;
-}
-
 #if 0
 static int
 udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -402,7 +394,6 @@
 
 static const struct drm_crtc_helper_funcs udl_helper_funcs = {
 	.dpms = udl_crtc_dpms,
-	.mode_fixup = udl_crtc_mode_fixup,
 	.mode_set = udl_crtc_mode_set,
 	.prepare = udl_crtc_prepare,
 	.commit = udl_crtc_commit,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 034ef2d..9807bc9 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -498,11 +498,12 @@
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
-	ret = copy_from_user(bo->base.vaddr,
+	if (copy_from_user(bo->base.vaddr,
 			     (void __user *)(uintptr_t)args->data,
-			     args->size);
-	if (ret != 0)
+			     args->size)) {
+		ret = -EFAULT;
 		goto fail;
+	}
 	/* Clear the rest of the memory from allocating from the BO
 	 * cache.
 	 */
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 018145e..355ee4b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -49,22 +49,27 @@
 	/* Which HVS channel we're using for our CRTC. */
 	int channel;
 
-	/* Pointer to the actual hardware display list memory for the
-	 * crtc.
-	 */
-	u32 __iomem *dlist;
-
-	u32 dlist_size; /* in dwords */
-
 	struct drm_pending_vblank_event *event;
 };
 
+struct vc4_crtc_state {
+	struct drm_crtc_state base;
+	/* Dlist area for this CRTC configuration. */
+	struct drm_mm_node mm;
+};
+
 static inline struct vc4_crtc *
 to_vc4_crtc(struct drm_crtc *crtc)
 {
 	return (struct vc4_crtc *)crtc;
 }
 
+static inline struct vc4_crtc_state *
+to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
+{
+	return (struct vc4_crtc_state *)crtc_state;
+}
+
 struct vc4_crtc_data {
 	/* Which channel of the HVS this pixelvalve sources from. */
 	int hvs_channel;
@@ -83,7 +88,7 @@
 } crtc_regs[] = {
 	CRTC_REG(PV_CONTROL),
 	CRTC_REG(PV_V_CONTROL),
-	CRTC_REG(PV_VSYNCD),
+	CRTC_REG(PV_VSYNCD_EVEN),
 	CRTC_REG(PV_HORZA),
 	CRTC_REG(PV_HORZB),
 	CRTC_REG(PV_VERTA),
@@ -183,6 +188,8 @@
 
 static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 	struct drm_crtc_state *state = crtc->state;
 	struct drm_display_mode *mode = &state->adjusted_mode;
@@ -212,6 +219,16 @@
 				 PV_HORZB_HFP) |
 		   VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
 
+	CRTC_WRITE(PV_VERTA,
+		   VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+				 PV_VERTA_VBP) |
+		   VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+				 PV_VERTA_VSYNC));
+	CRTC_WRITE(PV_VERTB,
+		   VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+				 PV_VERTB_VFP) |
+		   VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+
 	if (interlace) {
 		CRTC_WRITE(PV_VERTA_EVEN,
 			   VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1,
@@ -241,6 +258,10 @@
 		   PV_CONTROL_FIFO_CLR |
 		   PV_CONTROL_EN);
 
+	HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
+		  SCALER_DISPBKGND_AUTOHS |
+		  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
 		vc4_crtc_dump_regs(vc4_crtc);
@@ -319,11 +340,13 @@
 static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
 				 struct drm_crtc_state *state)
 {
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_plane *plane;
-	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	unsigned long flags;
 	u32 dlist_count = 0;
+	int ret;
 
 	/* The pixelvalve can only feed one encoder (and encoders are
 	 * 1:1 with connectors.)
@@ -346,18 +369,12 @@
 
 	dlist_count++; /* Account for SCALER_CTL0_END. */
 
-	if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) {
-		vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist +
-				   HVS_BOOTLOADER_DLIST_END);
-		vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) -
-					HVS_BOOTLOADER_DLIST_END);
-
-		if (dlist_count > vc4_crtc->dlist_size) {
-			DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
-				      dlist_count, vc4_crtc->dlist_size);
-			return -EINVAL;
-		}
-	}
+	spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
+	ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
+				 dlist_count, 1, 0);
+	spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
+	if (ret)
+		return ret;
 
 	return 0;
 }
@@ -368,47 +385,29 @@
 	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
 	struct drm_plane *plane;
 	bool debug_dump_regs = false;
-	u32 __iomem *dlist_next = vc4_crtc->dlist;
+	u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
+	u32 __iomem *dlist_next = dlist_start;
 
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
 		vc4_hvs_dump_state(dev);
 	}
 
-	/* Copy all the active planes' dlist contents to the hardware dlist.
-	 *
-	 * XXX: If the new display list was large enough that it
-	 * overlapped a currently-read display list, we need to do
-	 * something like disable scanout before putting in the new
-	 * list.  For now, we're safe because we only have the two
-	 * planes.
-	 */
+	/* Copy all the active planes' dlist contents to the hardware dlist. */
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		dlist_next += vc4_plane_write_dlist(plane, dlist_next);
 	}
 
-	if (dlist_next == vc4_crtc->dlist) {
-		/* If no planes were enabled, use the SCALER_CTL0_END
-		 * at the start of the display list memory (in the
-		 * bootloader section).  We'll rewrite that
-		 * SCALER_CTL0_END, just in case, though.
-		 */
-		writel(SCALER_CTL0_END, vc4->hvs->dlist);
-		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
-	} else {
-		writel(SCALER_CTL0_END, dlist_next);
-		dlist_next++;
+	writel(SCALER_CTL0_END, dlist_next);
+	dlist_next++;
 
-		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-			  (u32 __iomem *)vc4_crtc->dlist -
-			  (u32 __iomem *)vc4->hvs->dlist);
+	WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-		/* Make the next display list start after ours. */
-		vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
-		vc4_crtc->dlist = dlist_next;
-	}
+	HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+		  vc4_state->mm.start);
 
 	if (debug_dump_regs) {
 		DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
@@ -544,6 +543,7 @@
 	/* Make sure all other async modesetes have landed. */
 	ret = down_interruptible(&vc4->async_modeset);
 	if (ret) {
+		drm_framebuffer_unreference(fb);
 		kfree(flip_state);
 		return ret;
 	}
@@ -573,6 +573,36 @@
 		return drm_atomic_helper_page_flip(crtc, fb, event, flags);
 }
 
+static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct vc4_crtc_state *vc4_state;
+
+	vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
+	if (!vc4_state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
+	return &vc4_state->base;
+}
+
+static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
+				   struct drm_crtc_state *state)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
+	struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+	if (vc4_state->mm.allocated) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
+		drm_mm_remove_node(&vc4_state->mm);
+		spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
+
+	}
+
+	__drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
+
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = vc4_crtc_destroy,
@@ -581,8 +611,8 @@
 	.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
 	.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
 	.reset = drm_atomic_helper_crtc_reset,
-	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.atomic_duplicate_state = vc4_crtc_duplicate_state,
+	.atomic_destroy_state = vc4_crtc_destroy_state,
 };
 
 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -593,26 +623,6 @@
 	.atomic_flush = vc4_crtc_atomic_flush,
 };
 
-/* Frees the page flip event when the DRM device is closed with the
- * event still outstanding.
- */
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
-	struct drm_device *dev = crtc->dev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev->event_lock, flags);
-
-	if (vc4_crtc->event && vc4_crtc->event->base.file_priv == file) {
-		vc4_crtc->event->base.destroy(&vc4_crtc->event->base);
-		drm_crtc_vblank_put(crtc);
-		vc4_crtc->event = NULL;
-	}
-
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static const struct vc4_crtc_data pv0_data = {
 	.hvs_channel = 0,
 	.encoder0_type = VC4_ENCODER_TYPE_DSI0,
@@ -664,9 +674,9 @@
 	struct vc4_dev *vc4 = to_vc4_dev(drm);
 	struct vc4_crtc *vc4_crtc;
 	struct drm_crtc *crtc;
-	struct drm_plane *primary_plane, *cursor_plane;
+	struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp;
 	const struct of_device_id *match;
-	int ret;
+	int ret, i;
 
 	vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
 	if (!vc4_crtc)
@@ -695,27 +705,49 @@
 		goto err;
 	}
 
-	cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
-	if (IS_ERR(cursor_plane)) {
-		dev_err(dev, "failed to construct cursor plane\n");
-		ret = PTR_ERR(cursor_plane);
-		goto err_primary;
-	}
-
-	drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
+	drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
 				  &vc4_crtc_funcs, NULL);
 	drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
 	primary_plane->crtc = crtc;
-	cursor_plane->crtc = crtc;
 	vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
 	vc4_crtc->channel = vc4_crtc->data->hvs_channel;
 
+	/* Set up some arbitrary number of planes.  We're not limited
+	 * by a set number of physical registers, just the space in
+	 * the HVS (16k) and how small an plane can be (28 bytes).
+	 * However, each plane we set up takes up some memory, and
+	 * increases the cost of looping over planes, which atomic
+	 * modesetting does quite a bit.  As a result, we pick a
+	 * modest number of planes to expose, that should hopefully
+	 * still cover any sane usecase.
+	 */
+	for (i = 0; i < 8; i++) {
+		struct drm_plane *plane =
+			vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+
+		if (IS_ERR(plane))
+			continue;
+
+		plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+	}
+
+	/* Set up the legacy cursor after overlay initialization,
+	 * since we overlay planes on the CRTC in the order they were
+	 * initialized.
+	 */
+	cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+	if (!IS_ERR(cursor_plane)) {
+		cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+		cursor_plane->crtc = crtc;
+		crtc->cursor = cursor_plane;
+	}
+
 	CRTC_WRITE(PV_INTEN, 0);
 	CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
 	ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
 			       vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc);
 	if (ret)
-		goto err_cursor;
+		goto err_destroy_planes;
 
 	vc4_set_crtc_possible_masks(drm, crtc);
 
@@ -723,10 +755,12 @@
 
 	return 0;
 
-err_cursor:
-	cursor_plane->funcs->destroy(cursor_plane);
-err_primary:
-	primary_plane->funcs->destroy(primary_plane);
+err_destroy_planes:
+	list_for_each_entry_safe(destroy_plane, temp,
+				 &drm->mode_config.plane_list, head) {
+		if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+		    destroy_plane->funcs->destroy(destroy_plane);
+	}
 err:
 	return ret;
 }
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f1655ff..b7d2ff0 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -43,14 +43,6 @@
 	return map;
 }
 
-static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		vc4_cancel_page_flip(crtc, file);
-}
-
 static void vc4_lastclose(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -91,8 +83,6 @@
 			    DRIVER_HAVE_IRQ |
 			    DRIVER_PRIME),
 	.lastclose = vc4_lastclose,
-	.preclose = vc4_drm_preclose,
-
 	.irq_handler = vc4_irq,
 	.irq_preinstall = vc4_irq_preinstall,
 	.irq_postinstall = vc4_irq_postinstall,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 51a6333..fa2ad15 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -52,7 +52,7 @@
 	/* Protects bo_cache and the BO stats. */
 	struct mutex bo_lock;
 
-	/* Sequence number for the last job queued in job_list.
+	/* Sequence number for the last job queued in bin_job_list.
 	 * Starts at 0 (no jobs emitted).
 	 */
 	uint64_t emit_seqno;
@@ -62,11 +62,19 @@
 	 */
 	uint64_t finished_seqno;
 
-	/* List of all struct vc4_exec_info for jobs to be executed.
-	 * The first job in the list is the one currently programmed
-	 * into ct0ca/ct1ca for execution.
+	/* List of all struct vc4_exec_info for jobs to be executed in
+	 * the binner.  The first job in the list is the one currently
+	 * programmed into ct0ca for execution.
 	 */
-	struct list_head job_list;
+	struct list_head bin_job_list;
+
+	/* List of all struct vc4_exec_info for jobs that have
+	 * completed binning and are ready for rendering.  The first
+	 * job in the list is the one currently programmed into ct1ca
+	 * for execution.
+	 */
+	struct list_head render_job_list;
+
 	/* List of the finished vc4_exec_infos waiting to be freed by
 	 * job_done_work.
 	 */
@@ -154,7 +162,17 @@
 struct vc4_hvs {
 	struct platform_device *pdev;
 	void __iomem *regs;
-	void __iomem *dlist;
+	u32 __iomem *dlist;
+
+	/* Memory manager for CRTCs to allocate space in the display
+	 * list.  Units are dwords.
+	 */
+	struct drm_mm dlist_mm;
+	/* Memory manager for the LBM memory used by HVS scaling. */
+	struct drm_mm lbm_mm;
+	spinlock_t mm_lock;
+
+	struct drm_mm_node mitchell_netravali_filter;
 };
 
 struct vc4_plane {
@@ -286,11 +304,20 @@
 };
 
 static inline struct vc4_exec_info *
-vc4_first_job(struct vc4_dev *vc4)
+vc4_first_bin_job(struct vc4_dev *vc4)
 {
-	if (list_empty(&vc4->job_list))
+	if (list_empty(&vc4->bin_job_list))
 		return NULL;
-	return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
+	return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head);
+}
+
+static inline struct vc4_exec_info *
+vc4_first_render_job(struct vc4_dev *vc4)
+{
+	if (list_empty(&vc4->render_job_list))
+		return NULL;
+	return list_first_entry(&vc4->render_job_list,
+				struct vc4_exec_info, head);
 }
 
 /**
@@ -386,7 +413,6 @@
 extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
 
 /* vc4_debugfs.c */
@@ -405,7 +431,9 @@
 			 struct drm_file *file_priv);
 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
-void vc4_submit_next_job(struct drm_device *dev);
+void vc4_submit_next_bin_job(struct drm_device *dev);
+void vc4_submit_next_render_job(struct drm_device *dev);
+void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
 		       uint64_t timeout_ns, bool interruptible);
 void vc4_job_handle_completed(struct vc4_dev *vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 202aa15..8d4384f 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -141,10 +141,10 @@
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	struct drm_vc4_get_hang_state *state;
 	struct vc4_hang_state *kernel_state;
-	struct vc4_exec_info *exec;
+	struct vc4_exec_info *exec[2];
 	struct vc4_bo *bo;
 	unsigned long irqflags;
-	unsigned int i, unref_list_count;
+	unsigned int i, j, unref_list_count, prev_idx;
 
 	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
 	if (!kernel_state)
@@ -153,37 +153,55 @@
 	state = &kernel_state->user_state;
 
 	spin_lock_irqsave(&vc4->job_lock, irqflags);
-	exec = vc4_first_job(vc4);
-	if (!exec) {
+	exec[0] = vc4_first_bin_job(vc4);
+	exec[1] = vc4_first_render_job(vc4);
+	if (!exec[0] && !exec[1]) {
 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		return;
 	}
 
-	unref_list_count = 0;
-	list_for_each_entry(bo, &exec->unref_list, unref_head)
-		unref_list_count++;
+	/* Get the bos from both binner and renderer into hang state. */
+	state->bo_count = 0;
+	for (i = 0; i < 2; i++) {
+		if (!exec[i])
+			continue;
 
-	state->bo_count = exec->bo_count + unref_list_count;
-	kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
-				   GFP_ATOMIC);
+		unref_list_count = 0;
+		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
+			unref_list_count++;
+		state->bo_count += exec[i]->bo_count + unref_list_count;
+	}
+
+	kernel_state->bo = kcalloc(state->bo_count,
+				   sizeof(*kernel_state->bo), GFP_ATOMIC);
+
 	if (!kernel_state->bo) {
 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		return;
 	}
 
-	for (i = 0; i < exec->bo_count; i++) {
-		drm_gem_object_reference(&exec->bo[i]->base);
-		kernel_state->bo[i] = &exec->bo[i]->base;
+	prev_idx = 0;
+	for (i = 0; i < 2; i++) {
+		if (!exec[i])
+			continue;
+
+		for (j = 0; j < exec[i]->bo_count; j++) {
+			drm_gem_object_reference(&exec[i]->bo[j]->base);
+			kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+		}
+
+		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+			drm_gem_object_reference(&bo->base.base);
+			kernel_state->bo[j + prev_idx] = &bo->base.base;
+			j++;
+		}
+		prev_idx = j + 1;
 	}
 
-	list_for_each_entry(bo, &exec->unref_list, unref_head) {
-		drm_gem_object_reference(&bo->base.base);
-		kernel_state->bo[i] = &bo->base.base;
-		i++;
-	}
-
-	state->start_bin = exec->ct0ca;
-	state->start_render = exec->ct1ca;
+	if (exec[0])
+		state->start_bin = exec[0]->ct0ca;
+	if (exec[1])
+		state->start_render = exec[1]->ct1ca;
 
 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 
@@ -267,13 +285,15 @@
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 	uint32_t ct0ca, ct1ca;
 	unsigned long irqflags;
-	struct vc4_exec_info *exec;
+	struct vc4_exec_info *bin_exec, *render_exec;
 
 	spin_lock_irqsave(&vc4->job_lock, irqflags);
-	exec = vc4_first_job(vc4);
+
+	bin_exec = vc4_first_bin_job(vc4);
+	render_exec = vc4_first_render_job(vc4);
 
 	/* If idle, we can stop watching for hangs. */
-	if (!exec) {
+	if (!bin_exec && !render_exec) {
 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		return;
 	}
@@ -284,9 +304,12 @@
 	/* If we've made any progress in execution, rearm the timer
 	 * and wait.
 	 */
-	if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
-		exec->last_ct0ca = ct0ca;
-		exec->last_ct1ca = ct1ca;
+	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
+	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
+		if (bin_exec)
+			bin_exec->last_ct0ca = ct0ca;
+		if (render_exec)
+			render_exec->last_ct1ca = ct1ca;
 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 		vc4_queue_hangcheck(dev);
 		return;
@@ -386,11 +409,13 @@
  * The job_lock should be held during this.
  */
 void
-vc4_submit_next_job(struct drm_device *dev)
+vc4_submit_next_bin_job(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	struct vc4_exec_info *exec = vc4_first_job(vc4);
+	struct vc4_exec_info *exec;
 
+again:
+	exec = vc4_first_bin_job(vc4);
 	if (!exec)
 		return;
 
@@ -400,11 +425,40 @@
 	V3D_WRITE(V3D_BPOA, 0);
 	V3D_WRITE(V3D_BPOS, 0);
 
-	if (exec->ct0ca != exec->ct0ea)
+	/* Either put the job in the binner if it uses the binner, or
+	 * immediately move it to the to-be-rendered queue.
+	 */
+	if (exec->ct0ca != exec->ct0ea) {
 		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+	} else {
+		vc4_move_job_to_render(dev, exec);
+		goto again;
+	}
+}
+
+void
+vc4_submit_next_render_job(struct drm_device *dev)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
+
+	if (!exec)
+		return;
+
 	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
 }
 
+void
+vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	bool was_empty = list_empty(&vc4->render_job_list);
+
+	list_move_tail(&exec->head, &vc4->render_job_list);
+	if (was_empty)
+		vc4_submit_next_render_job(dev);
+}
+
 static void
 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 {
@@ -443,14 +497,14 @@
 	exec->seqno = seqno;
 	vc4_update_bo_seqnos(exec, seqno);
 
-	list_add_tail(&exec->head, &vc4->job_list);
+	list_add_tail(&exec->head, &vc4->bin_job_list);
 
 	/* If no job was executing, kick ours off.  Otherwise, it'll
-	 * get started when the previous job's frame done interrupt
+	 * get started when the previous job's flush done interrupt
 	 * occurs.
 	 */
-	if (vc4_first_job(vc4) == exec) {
-		vc4_submit_next_job(dev);
+	if (vc4_first_bin_job(vc4) == exec) {
+		vc4_submit_next_bin_job(dev);
 		vc4_queue_hangcheck(dev);
 	}
 
@@ -859,7 +913,8 @@
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-	INIT_LIST_HEAD(&vc4->job_list);
+	INIT_LIST_HEAD(&vc4->bin_job_list);
+	INIT_LIST_HEAD(&vc4->render_job_list);
 	INIT_LIST_HEAD(&vc4->job_done_list);
 	INIT_LIST_HEAD(&vc4->seqno_cb_list);
 	spin_lock_init(&vc4->job_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index c69c046..d8b8649 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -47,6 +47,7 @@
 	void __iomem *hdmicore_regs;
 	void __iomem *hd_regs;
 	int hpd_gpio;
+	bool hpd_active_low;
 
 	struct clk *pixel_clock;
 	struct clk *hsm_clock;
@@ -95,6 +96,7 @@
 	HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
 	HDMI_REG(VC4_HDMI_HOTPLUG_INT),
 	HDMI_REG(VC4_HDMI_HOTPLUG),
+	HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG),
 	HDMI_REG(VC4_HDMI_HORZA),
 	HDMI_REG(VC4_HDMI_HORZB),
 	HDMI_REG(VC4_HDMI_FIFO_CTL),
@@ -165,7 +167,8 @@
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
 	if (vc4->hdmi->hpd_gpio) {
-		if (gpio_get_value(vc4->hdmi->hpd_gpio))
+		if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^
+		    vc4->hdmi->hpd_active_low)
 			return connector_status_connected;
 		else
 			return connector_status_disconnected;
@@ -495,6 +498,16 @@
 		goto err_put_i2c;
 	}
 
+	/* This is the rate that is set by the firmware.  The number
+	 * needs to be a bit higher than the pixel clock rate
+	 * (generally 148.5Mhz).
+	 */
+	ret = clk_set_rate(hdmi->hsm_clock, 163682864);
+	if (ret) {
+		DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+		goto err_unprepare_pix;
+	}
+
 	ret = clk_prepare_enable(hdmi->hsm_clock);
 	if (ret) {
 		DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
@@ -506,17 +519,40 @@
 	 * we'll use the HDMI core's register.
 	 */
 	if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
-		hdmi->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
+		enum of_gpio_flags hpd_gpio_flags;
+
+		hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
+							 "hpd-gpios", 0,
+							 &hpd_gpio_flags);
 		if (hdmi->hpd_gpio < 0) {
 			ret = hdmi->hpd_gpio;
 			goto err_unprepare_hsm;
 		}
+
+		hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
 	}
 
 	vc4->hdmi = hdmi;
 
 	/* HDMI core must be enabled. */
-	WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
+	if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) {
+		HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
+		udelay(1);
+		HD_WRITE(VC4_HD_M_CTL, 0);
+
+		HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE);
+
+		HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL,
+			   VC4_HDMI_SW_RESET_HDMI |
+			   VC4_HDMI_SW_RESET_FORMAT_DETECT);
+
+		HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, 0);
+
+		/* PHY should be in reset, like
+		 * vc4_hdmi_encoder_disable() does.
+		 */
+		HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+	}
 
 	drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
 			 DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 8098c5b..6fbab1c 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -100,12 +100,76 @@
 }
 #endif
 
+/* The filter kernel is composed of dwords each containing 3 9-bit
+ * signed integers packed next to each other.
+ */
+#define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
+#define VC4_PPF_FILTER_WORD(c0, c1, c2)				\
+	((((c0) & 0x1ff) << 0) |				\
+	 (((c1) & 0x1ff) << 9) |				\
+	 (((c2) & 0x1ff) << 18))
+
+/* The whole filter kernel is arranged as the coefficients 0-16 going
+ * up, then a pad, then 17-31 going down and reversed within the
+ * dwords.  This means that a linear phase kernel (where it's
+ * symmetrical at the boundary between 15 and 16) has the last 5
+ * dwords matching the first 5, but reversed.
+ */
+#define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8,	\
+				c9, c10, c11, c12, c13, c14, c15)	\
+	{VC4_PPF_FILTER_WORD(c0, c1, c2),				\
+	 VC4_PPF_FILTER_WORD(c3, c4, c5),				\
+	 VC4_PPF_FILTER_WORD(c6, c7, c8),				\
+	 VC4_PPF_FILTER_WORD(c9, c10, c11),				\
+	 VC4_PPF_FILTER_WORD(c12, c13, c14),				\
+	 VC4_PPF_FILTER_WORD(c15, c15, 0)}
+
+#define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
+#define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
+
+/* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
+ * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
+ */
+static const u32 mitchell_netravali_1_3_1_3_kernel[] =
+	VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
+				50, 82, 119, 155, 187, 213, 227);
+
+static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
+					struct drm_mm_node *space,
+					const u32 *kernel)
+{
+	int ret, i;
+	u32 __iomem *dst_kernel;
+
+	ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
+				 0);
+	if (ret) {
+		DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
+			  ret);
+		return ret;
+	}
+
+	dst_kernel = hvs->dlist + space->start;
+
+	for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
+		if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
+			writel(kernel[i], &dst_kernel[i]);
+		else {
+			writel(kernel[VC4_KERNEL_DWORDS - i - 1],
+			       &dst_kernel[i]);
+		}
+	}
+
+	return 0;
+}
+
 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm = dev_get_drvdata(master);
 	struct vc4_dev *vc4 = drm->dev_private;
 	struct vc4_hvs *hvs = NULL;
+	int ret;
 
 	hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
 	if (!hvs)
@@ -119,6 +183,33 @@
 
 	hvs->dlist = hvs->regs + SCALER_DLIST_START;
 
+	spin_lock_init(&hvs->mm_lock);
+
+	/* Set up the HVS display list memory manager.  We never
+	 * overwrite the setup from the bootloader (just 128b out of
+	 * our 16K), since we don't want to scramble the screen when
+	 * transitioning from the firmware's boot setup to runtime.
+	 */
+	drm_mm_init(&hvs->dlist_mm,
+		    HVS_BOOTLOADER_DLIST_END,
+		    (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
+
+	/* Set up the HVS LBM memory manager.  We could have some more
+	 * complicated data structure that allowed reuse of LBM areas
+	 * between planes when they don't overlap on the screen, but
+	 * for now we just allocate globally.
+	 */
+	drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+
+	/* Upload filter kernels.  We only have the one for now, so we
+	 * keep it around for the lifetime of the driver.
+	 */
+	ret = vc4_hvs_upload_linear_kernel(hvs,
+					   &hvs->mitchell_netravali_filter,
+					   mitchell_netravali_1_3_1_3_kernel);
+	if (ret)
+		return ret;
+
 	vc4->hvs = hvs;
 	return 0;
 }
@@ -129,6 +220,12 @@
 	struct drm_device *drm = dev_get_drvdata(master);
 	struct vc4_dev *vc4 = drm->dev_private;
 
+	if (vc4->hvs->mitchell_netravali_filter.allocated)
+		drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
+
+	drm_mm_takedown(&vc4->hvs->dlist_mm);
+	drm_mm_takedown(&vc4->hvs->lbm_mm);
+
 	vc4->hvs = NULL;
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 78a2135..b0104a34 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -30,6 +30,10 @@
  * disables that specific interrupt, and 0s written are ignored
  * (reading either one returns the set of enabled interrupts).
  *
+ * When we take a binning flush done interrupt, we need to submit the
+ * next frame for binning and move the finished frame to the render
+ * thread.
+ *
  * When we take a render frame interrupt, we need to wake the
  * processes waiting for some frame to be done, and get the next frame
  * submitted ASAP (so the hardware doesn't sit idle when there's work
@@ -44,6 +48,7 @@
 #include "vc4_regs.h"
 
 #define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
+			 V3D_INT_FLDONE | \
 			 V3D_INT_FRDONE)
 
 DECLARE_WAIT_QUEUE_HEAD(render_wait);
@@ -77,7 +82,7 @@
 		unsigned long irqflags;
 
 		spin_lock_irqsave(&vc4->job_lock, irqflags);
-		current_exec = vc4_first_job(vc4);
+		current_exec = vc4_first_bin_job(vc4);
 		if (current_exec) {
 			vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
 			list_add_tail(&vc4->overflow_mem->unref_head,
@@ -98,17 +103,43 @@
 }
 
 static void
-vc4_irq_finish_job(struct drm_device *dev)
+vc4_irq_finish_bin_job(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	struct vc4_exec_info *exec = vc4_first_job(vc4);
+	struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+	if (!exec)
+		return;
+
+	vc4_move_job_to_render(dev, exec);
+	vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_cancel_bin_job(struct drm_device *dev)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+	if (!exec)
+		return;
+
+	list_move_tail(&exec->head, &vc4->bin_job_list);
+	vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_irq_finish_render_job(struct drm_device *dev)
+{
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
+	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
 
 	if (!exec)
 		return;
 
 	vc4->finished_seqno++;
 	list_move_tail(&exec->head, &vc4->job_done_list);
-	vc4_submit_next_job(dev);
+	vc4_submit_next_render_job(dev);
 
 	wake_up_all(&vc4->job_wait_queue);
 	schedule_work(&vc4->job_done_work);
@@ -125,9 +156,10 @@
 	barrier();
 	intctl = V3D_READ(V3D_INTCTL);
 
-	/* Acknowledge the interrupts we're handling here. The render
-	 * frame done interrupt will be cleared, while OUTOMEM will
-	 * stay high until the underlying cause is cleared.
+	/* Acknowledge the interrupts we're handling here. The binner
+	 * last flush / render frame done interrupt will be cleared,
+	 * while OUTOMEM will stay high until the underlying cause is
+	 * cleared.
 	 */
 	V3D_WRITE(V3D_INTCTL, intctl);
 
@@ -138,9 +170,16 @@
 		status = IRQ_HANDLED;
 	}
 
+	if (intctl & V3D_INT_FLDONE) {
+		spin_lock(&vc4->job_lock);
+		vc4_irq_finish_bin_job(dev);
+		spin_unlock(&vc4->job_lock);
+		status = IRQ_HANDLED;
+	}
+
 	if (intctl & V3D_INT_FRDONE) {
 		spin_lock(&vc4->job_lock);
-		vc4_irq_finish_job(dev);
+		vc4_irq_finish_render_job(dev);
 		spin_unlock(&vc4->job_lock);
 		status = IRQ_HANDLED;
 	}
@@ -205,6 +244,7 @@
 	V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
 
 	spin_lock_irqsave(&vc4->job_lock, irqflags);
-	vc4_irq_finish_job(dev);
+	vc4_cancel_bin_job(dev);
+	vc4_irq_finish_render_job(dev);
 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 }
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f95f2df..4718ae5 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -49,6 +49,15 @@
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
+	/* Make sure that drm_atomic_helper_wait_for_vblanks()
+	 * actually waits for vblank.  If we're doing a full atomic
+	 * modeset (as opposed to a vc4_update_plane() short circuit),
+	 * then we need to wait for scanout to be done with our
+	 * display lists before we free it and potentially reallocate
+	 * and overwrite the dlist memory with a new modeset.
+	 */
+	state->legacy_cursor_update = false;
+
 	drm_atomic_helper_wait_for_vblanks(dev, state);
 
 	drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 0addbad..7b0c72a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -24,19 +24,52 @@
 #include "drm_fb_cma_helper.h"
 #include "drm_plane_helper.h"
 
+enum vc4_scaling_mode {
+	VC4_SCALING_NONE,
+	VC4_SCALING_TPZ,
+	VC4_SCALING_PPF,
+};
+
 struct vc4_plane_state {
 	struct drm_plane_state base;
+	/* System memory copy of the display list for this element, computed
+	 * at atomic_check time.
+	 */
 	u32 *dlist;
-	u32 dlist_size; /* Number of dwords in allocated for the display list */
+	u32 dlist_size; /* Number of dwords allocated for the display list */
 	u32 dlist_count; /* Number of used dwords in the display list. */
 
-	/* Offset in the dlist to pointer word 0. */
-	u32 pw0_offset;
+	/* Offset in the dlist to various words, for pageflip or
+	 * cursor updates.
+	 */
+	u32 pos0_offset;
+	u32 pos2_offset;
+	u32 ptr0_offset;
 
 	/* Offset where the plane's dlist was last stored in the
-	   hardware at vc4_crtc_atomic_flush() time.
-	*/
-	u32 *hw_dlist;
+	 * hardware at vc4_crtc_atomic_flush() time.
+	 */
+	u32 __iomem *hw_dlist;
+
+	/* Clipped coordinates of the plane on the display. */
+	int crtc_x, crtc_y, crtc_w, crtc_h;
+	/* Clipped area being scanned from in the FB. */
+	u32 src_x, src_y;
+
+	u32 src_w[2], src_h[2];
+
+	/* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
+	enum vc4_scaling_mode x_scaling[2], y_scaling[2];
+	bool is_unity;
+	bool is_yuv;
+
+	/* Offset to start scanning out from the start of the plane's
+	 * BO.
+	 */
+	u32 offsets[3];
+
+	/* Our allocation in LBM for temporary storage during scaling. */
+	struct drm_mm_node lbm;
 };
 
 static inline struct vc4_plane_state *
@@ -50,6 +83,7 @@
 	u32 hvs; /* HVS_FORMAT_* */
 	u32 pixel_order;
 	bool has_alpha;
+	bool flip_cbcr;
 } hvs_formats[] = {
 	{
 		.drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
@@ -59,6 +93,48 @@
 		.drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
 		.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
 	},
+	{
+		.drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+		.pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
+	},
+	{
+		.drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+		.pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
+	},
+	{
+		.drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+		.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
+	},
+	{
+		.drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+		.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
+	},
+	{
+		.drm = DRM_FORMAT_YUV422,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+	},
+	{
+		.drm = DRM_FORMAT_YVU422,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+		.flip_cbcr = true,
+	},
+	{
+		.drm = DRM_FORMAT_YUV420,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
+	},
+	{
+		.drm = DRM_FORMAT_YVU420,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
+		.flip_cbcr = true,
+	},
+	{
+		.drm = DRM_FORMAT_NV12,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
+	},
+	{
+		.drm = DRM_FORMAT_NV16,
+		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
+	},
 };
 
 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -73,6 +149,16 @@
 	return NULL;
 }
 
+static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
+{
+	if (dst > src)
+		return VC4_SCALING_PPF;
+	else if (dst < src)
+		return VC4_SCALING_TPZ;
+	else
+		return VC4_SCALING_NONE;
+}
+
 static bool plane_enabled(struct drm_plane_state *state)
 {
 	return state->fb && state->crtc;
@@ -89,6 +175,8 @@
 	if (!vc4_state)
 		return NULL;
 
+	memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+
 	__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
 
 	if (vc4_state->dlist) {
@@ -108,8 +196,17 @@
 static void vc4_plane_destroy_state(struct drm_plane *plane,
 				    struct drm_plane_state *state)
 {
+	struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
 
+	if (vc4_state->lbm.allocated) {
+		unsigned long irqflags;
+
+		spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+		drm_mm_remove_node(&vc4_state->lbm);
+		spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+	}
+
 	kfree(vc4_state->dlist);
 	__drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
 	kfree(state);
@@ -148,84 +245,400 @@
 	vc4_state->dlist[vc4_state->dlist_count++] = val;
 }
 
+/* Returns the scl0/scl1 field based on whether the dimensions need to
+ * be up/down/non-scaled.
+ *
+ * This is a replication of a table from the spec.
+ */
+static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
+{
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+	switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
+	case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
+		return SCALER_CTL0_SCL_H_PPF_V_PPF;
+	case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
+		return SCALER_CTL0_SCL_H_TPZ_V_PPF;
+	case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
+		return SCALER_CTL0_SCL_H_PPF_V_TPZ;
+	case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
+		return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
+	case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
+		return SCALER_CTL0_SCL_H_PPF_V_NONE;
+	case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
+		return SCALER_CTL0_SCL_H_NONE_V_PPF;
+	case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
+		return SCALER_CTL0_SCL_H_NONE_V_TPZ;
+	case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
+		return SCALER_CTL0_SCL_H_TPZ_V_NONE;
+	default:
+	case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
+		/* The unity case is independently handled by
+		 * SCALER_CTL0_UNITY.
+		 */
+		return 0;
+	}
+}
+
+static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+{
+	struct drm_plane *plane = state->plane;
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+	u32 subpixel_src_mask = (1 << 16) - 1;
+	u32 format = fb->pixel_format;
+	int num_planes = drm_format_num_planes(format);
+	u32 h_subsample = 1;
+	u32 v_subsample = 1;
+	int i;
+
+	for (i = 0; i < num_planes; i++)
+		vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
+
+	/* We don't support subpixel source positioning for scaling. */
+	if ((state->src_x & subpixel_src_mask) ||
+	    (state->src_y & subpixel_src_mask) ||
+	    (state->src_w & subpixel_src_mask) ||
+	    (state->src_h & subpixel_src_mask)) {
+		return -EINVAL;
+	}
+
+	vc4_state->src_x = state->src_x >> 16;
+	vc4_state->src_y = state->src_y >> 16;
+	vc4_state->src_w[0] = state->src_w >> 16;
+	vc4_state->src_h[0] = state->src_h >> 16;
+
+	vc4_state->crtc_x = state->crtc_x;
+	vc4_state->crtc_y = state->crtc_y;
+	vc4_state->crtc_w = state->crtc_w;
+	vc4_state->crtc_h = state->crtc_h;
+
+	vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
+						       vc4_state->crtc_w);
+	vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
+						       vc4_state->crtc_h);
+
+	if (num_planes > 1) {
+		vc4_state->is_yuv = true;
+
+		h_subsample = drm_format_horz_chroma_subsampling(format);
+		v_subsample = drm_format_vert_chroma_subsampling(format);
+		vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
+		vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
+
+		vc4_state->x_scaling[1] =
+			vc4_get_scaling_mode(vc4_state->src_w[1],
+					     vc4_state->crtc_w);
+		vc4_state->y_scaling[1] =
+			vc4_get_scaling_mode(vc4_state->src_h[1],
+					     vc4_state->crtc_h);
+
+		/* YUV conversion requires that scaling be enabled,
+		 * even on a plane that's otherwise 1:1.  Choose TPZ
+		 * for simplicity.
+		 */
+		if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
+			vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
+		if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
+			vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+	}
+
+	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+			       vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+			       vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
+			       vc4_state->y_scaling[1] == VC4_SCALING_NONE);
+
+	/* No configuring scaling on the cursor plane, since it gets
+	   non-vblank-synced updates, and scaling requires requires
+	   LBM changes which have to be vblank-synced.
+	 */
+	if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
+		return -EINVAL;
+
+	/* Clamp the on-screen start x/y to 0.  The hardware doesn't
+	 * support negative y, and negative x wastes bandwidth.
+	 */
+	if (vc4_state->crtc_x < 0) {
+		for (i = 0; i < num_planes; i++) {
+			u32 cpp = drm_format_plane_cpp(fb->pixel_format, i);
+			u32 subs = ((i == 0) ? 1 : h_subsample);
+
+			vc4_state->offsets[i] += (cpp *
+						  (-vc4_state->crtc_x) / subs);
+		}
+		vc4_state->src_w[0] += vc4_state->crtc_x;
+		vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
+		vc4_state->crtc_x = 0;
+	}
+
+	if (vc4_state->crtc_y < 0) {
+		for (i = 0; i < num_planes; i++) {
+			u32 subs = ((i == 0) ? 1 : v_subsample);
+
+			vc4_state->offsets[i] += (fb->pitches[i] *
+						  (-vc4_state->crtc_y) / subs);
+		}
+		vc4_state->src_h[0] += vc4_state->crtc_y;
+		vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
+		vc4_state->crtc_y = 0;
+	}
+
+	return 0;
+}
+
+static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
+{
+	u32 scale, recip;
+
+	scale = (1 << 16) * src / dst;
+
+	/* The specs note that while the reciprocal would be defined
+	 * as (1<<32)/scale, ~0 is close enough.
+	 */
+	recip = ~0 / scale;
+
+	vc4_dlist_write(vc4_state,
+			VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
+			VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
+	vc4_dlist_write(vc4_state,
+			VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
+}
+
+static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
+{
+	u32 scale = (1 << 16) * src / dst;
+
+	vc4_dlist_write(vc4_state,
+			SCALER_PPF_AGC |
+			VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
+			VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
+}
+
+static u32 vc4_lbm_size(struct drm_plane_state *state)
+{
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+	/* This is the worst case number.  One of the two sizes will
+	 * be used depending on the scaling configuration.
+	 */
+	u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
+	u32 lbm;
+
+	if (!vc4_state->is_yuv) {
+		if (vc4_state->is_unity)
+			return 0;
+		else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+			lbm = pix_per_line * 8;
+		else {
+			/* In special cases, this multiplier might be 12. */
+			lbm = pix_per_line * 16;
+		}
+	} else {
+		/* There are cases for this going down to a multiplier
+		 * of 2, but according to the firmware source, the
+		 * table in the docs is somewhat wrong.
+		 */
+		lbm = pix_per_line * 16;
+	}
+
+	lbm = roundup(lbm, 32);
+
+	return lbm;
+}
+
+static void vc4_write_scaling_parameters(struct drm_plane_state *state,
+					 int channel)
+{
+	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+	/* Ch0 H-PPF Word 0: Scaling Parameters */
+	if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
+		vc4_write_ppf(vc4_state,
+			      vc4_state->src_w[channel], vc4_state->crtc_w);
+	}
+
+	/* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
+	if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
+		vc4_write_ppf(vc4_state,
+			      vc4_state->src_h[channel], vc4_state->crtc_h);
+		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+	}
+
+	/* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
+	if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
+		vc4_write_tpz(vc4_state,
+			      vc4_state->src_w[channel], vc4_state->crtc_w);
+	}
+
+	/* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
+	if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
+		vc4_write_tpz(vc4_state,
+			      vc4_state->src_h[channel], vc4_state->crtc_h);
+		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+	}
+}
+
 /* Writes out a full display list for an active plane to the plane's
  * private dlist state.
  */
 static int vc4_plane_mode_set(struct drm_plane *plane,
 			      struct drm_plane_state *state)
 {
+	struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
 	struct drm_framebuffer *fb = state->fb;
-	struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
 	u32 ctl0_offset = vc4_state->dlist_count;
 	const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
-	uint32_t offset = fb->offsets[0];
-	int crtc_x = state->crtc_x;
-	int crtc_y = state->crtc_y;
-	int crtc_w = state->crtc_w;
-	int crtc_h = state->crtc_h;
+	int num_planes = drm_format_num_planes(format->drm);
+	u32 scl0, scl1;
+	u32 lbm_size;
+	unsigned long irqflags;
+	int ret, i;
 
-	if (state->crtc_w << 16 != state->src_w ||
-	    state->crtc_h << 16 != state->src_h) {
-		/* We don't support scaling yet, which involves
-		 * allocating the LBM memory for scaling temporary
-		 * storage, and putting filter kernels in the HVS
-		 * context.
-		 */
-		return -EINVAL;
+	ret = vc4_plane_setup_clipping_and_scaling(state);
+	if (ret)
+		return ret;
+
+	/* Allocate the LBM memory that the HVS will use for temporary
+	 * storage due to our scaling/format conversion.
+	 */
+	lbm_size = vc4_lbm_size(state);
+	if (lbm_size) {
+		if (!vc4_state->lbm.allocated) {
+			spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+			ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
+						 &vc4_state->lbm,
+						 lbm_size, 32, 0);
+			spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+		} else {
+			WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+		}
 	}
 
-	if (crtc_x < 0) {
-		offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
-		crtc_w += crtc_x;
-		crtc_x = 0;
+	if (ret)
+		return ret;
+
+	/* SCL1 is used for Cb/Cr scaling of planar formats.  For RGB
+	 * and 4:4:4, scl1 should be set to scl0 so both channels of
+	 * the scaler do the same thing.  For YUV, the Y plane needs
+	 * to be put in channel 1 and Cb/Cr in channel 0, so we swap
+	 * the scl fields here.
+	 */
+	if (num_planes == 1) {
+		scl0 = vc4_get_scl_field(state, 1);
+		scl1 = scl0;
+	} else {
+		scl0 = vc4_get_scl_field(state, 1);
+		scl1 = vc4_get_scl_field(state, 0);
 	}
 
-	if (crtc_y < 0) {
-		offset += fb->pitches[0] * -crtc_y;
-		crtc_h += crtc_y;
-		crtc_y = 0;
-	}
-
+	/* Control word */
 	vc4_dlist_write(vc4_state,
 			SCALER_CTL0_VALID |
 			(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
 			(format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
-			SCALER_CTL0_UNITY);
+			(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
+			VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
+			VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
 
 	/* Position Word 0: Image Positions and Alpha Value */
+	vc4_state->pos0_offset = vc4_state->dlist_count;
 	vc4_dlist_write(vc4_state,
 			VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
-			VC4_SET_FIELD(crtc_x, SCALER_POS0_START_X) |
-			VC4_SET_FIELD(crtc_y, SCALER_POS0_START_Y));
+			VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
+			VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
 
-	/* Position Word 1: Scaled Image Dimensions.
-	 * Skipped due to SCALER_CTL0_UNITY scaling.
-	 */
+	/* Position Word 1: Scaled Image Dimensions. */
+	if (!vc4_state->is_unity) {
+		vc4_dlist_write(vc4_state,
+				VC4_SET_FIELD(vc4_state->crtc_w,
+					      SCALER_POS1_SCL_WIDTH) |
+				VC4_SET_FIELD(vc4_state->crtc_h,
+					      SCALER_POS1_SCL_HEIGHT));
+	}
 
 	/* Position Word 2: Source Image Size, Alpha Mode */
+	vc4_state->pos2_offset = vc4_state->dlist_count;
 	vc4_dlist_write(vc4_state,
 			VC4_SET_FIELD(format->has_alpha ?
 				      SCALER_POS2_ALPHA_MODE_PIPELINE :
 				      SCALER_POS2_ALPHA_MODE_FIXED,
 				      SCALER_POS2_ALPHA_MODE) |
-			VC4_SET_FIELD(crtc_w, SCALER_POS2_WIDTH) |
-			VC4_SET_FIELD(crtc_h, SCALER_POS2_HEIGHT));
+			VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
+			VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
 
 	/* Position Word 3: Context.  Written by the HVS. */
 	vc4_dlist_write(vc4_state, 0xc0c0c0c0);
 
-	vc4_state->pw0_offset = vc4_state->dlist_count;
 
-	/* Pointer Word 0: RGB / Y Pointer */
-	vc4_dlist_write(vc4_state, bo->paddr + offset);
+	/* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
+	 *
+	 * The pointers may be any byte address.
+	 */
+	vc4_state->ptr0_offset = vc4_state->dlist_count;
+	if (!format->flip_cbcr) {
+		for (i = 0; i < num_planes; i++)
+			vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
+	} else {
+		WARN_ON_ONCE(num_planes != 3);
+		vc4_dlist_write(vc4_state, vc4_state->offsets[0]);
+		vc4_dlist_write(vc4_state, vc4_state->offsets[2]);
+		vc4_dlist_write(vc4_state, vc4_state->offsets[1]);
+	}
 
-	/* Pointer Context Word 0: Written by the HVS */
-	vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+	/* Pointer Context Word 0/1/2: Written by the HVS */
+	for (i = 0; i < num_planes; i++)
+		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
 
-	/* Pitch word 0: Pointer 0 Pitch */
-	vc4_dlist_write(vc4_state,
-			VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH));
+	/* Pitch word 0/1/2 */
+	for (i = 0; i < num_planes; i++) {
+		vc4_dlist_write(vc4_state,
+				VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+	}
+
+	/* Colorspace conversion words */
+	if (vc4_state->is_yuv) {
+		vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
+		vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
+		vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
+	}
+
+	if (!vc4_state->is_unity) {
+		/* LBM Base Address. */
+		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+			vc4_dlist_write(vc4_state, vc4_state->lbm.start);
+		}
+
+		if (num_planes > 1) {
+			/* Emit Cb/Cr as channel 0 and Y as channel
+			 * 1. This matches how we set up scl0/scl1
+			 * above.
+			 */
+			vc4_write_scaling_parameters(state, 1);
+		}
+		vc4_write_scaling_parameters(state, 0);
+
+		/* If any PPF setup was done, then all the kernel
+		 * pointers get uploaded.
+		 */
+		if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
+		    vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
+		    vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
+		    vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
+			u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
+						   SCALER_PPF_KERNEL_OFFSET);
+
+			/* HPPF plane 0 */
+			vc4_dlist_write(vc4_state, kernel);
+			/* VPPF plane 0 */
+			vc4_dlist_write(vc4_state, kernel);
+			/* HPPF plane 1 */
+			vc4_dlist_write(vc4_state, kernel);
+			/* VPPF plane 1 */
+			vc4_dlist_write(vc4_state, kernel);
+		}
+	}
 
 	vc4_state->dlist[ctl0_offset] |=
 		VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
@@ -303,13 +716,13 @@
 	 * scanout will start from this address as soon as the FIFO
 	 * needs to refill with pixels.
 	 */
-	writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
+	writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
 
 	/* Also update the CPU-side dlist copy, so that any later
 	 * atomic updates that don't do a new modeset on our plane
 	 * also use our updated address.
 	 */
-	vc4_state->dlist[vc4_state->pw0_offset] = addr;
+	vc4_state->dlist[vc4_state->ptr0_offset] = addr;
 }
 
 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
@@ -325,8 +738,83 @@
 	drm_plane_cleanup(plane);
 }
 
+/* Implements immediate (non-vblank-synced) updates of the cursor
+ * position, or falls back to the atomic helper otherwise.
+ */
+static int
+vc4_update_plane(struct drm_plane *plane,
+		 struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t src_x, uint32_t src_y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_plane_state *plane_state;
+	struct vc4_plane_state *vc4_state;
+
+	if (plane != crtc->cursor)
+		goto out;
+
+	plane_state = plane->state;
+	vc4_state = to_vc4_plane_state(plane_state);
+
+	if (!plane_state)
+		goto out;
+
+	/* If we're changing the cursor contents, do that in the
+	 * normal vblank-synced atomic path.
+	 */
+	if (fb != plane_state->fb)
+		goto out;
+
+	/* No configuring new scaling in the fast path. */
+	if (crtc_w != plane_state->crtc_w ||
+	    crtc_h != plane_state->crtc_h ||
+	    src_w != plane_state->src_w ||
+	    src_h != plane_state->src_h) {
+		goto out;
+	}
+
+	/* Set the cursor's position on the screen.  This is the
+	 * expected change from the drm_mode_cursor_universal()
+	 * helper.
+	 */
+	plane_state->crtc_x = crtc_x;
+	plane_state->crtc_y = crtc_y;
+
+	/* Allow changing the start position within the cursor BO, if
+	 * that matters.
+	 */
+	plane_state->src_x = src_x;
+	plane_state->src_y = src_y;
+
+	/* Update the display list based on the new crtc_x/y. */
+	vc4_plane_atomic_check(plane, plane_state);
+
+	/* Note that we can't just call vc4_plane_write_dlist()
+	 * because that would smash the context data that the HVS is
+	 * currently using.
+	 */
+	writel(vc4_state->dlist[vc4_state->pos0_offset],
+	       &vc4_state->hw_dlist[vc4_state->pos0_offset]);
+	writel(vc4_state->dlist[vc4_state->pos2_offset],
+	       &vc4_state->hw_dlist[vc4_state->pos2_offset]);
+	writel(vc4_state->dlist[vc4_state->ptr0_offset],
+	       &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+
+	return 0;
+
+out:
+	return drm_atomic_helper_update_plane(plane, crtc, fb,
+					      crtc_x, crtc_y,
+					      crtc_w, crtc_h,
+					      src_x, src_y,
+					      src_w, src_h);
+}
+
 static const struct drm_plane_funcs vc4_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
+	.update_plane = vc4_update_plane,
 	.disable_plane = drm_atomic_helper_disable_plane,
 	.destroy = vc4_plane_destroy,
 	.set_property = NULL,
@@ -341,6 +829,7 @@
 	struct drm_plane *plane = NULL;
 	struct vc4_plane *vc4_plane;
 	u32 formats[ARRAY_SIZE(hvs_formats)];
+	u32 num_formats = 0;
 	int ret = 0;
 	unsigned i;
 
@@ -351,12 +840,20 @@
 		goto fail;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
-		formats[i] = hvs_formats[i].drm;
+	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+		/* Don't allow YUV in cursor planes, since that means
+		 * tuning on the scaler, which we don't allow for the
+		 * cursor.
+		 */
+		if (type != DRM_PLANE_TYPE_CURSOR ||
+		    hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
+			formats[num_formats++] = hvs_formats[i].drm;
+		}
+	}
 	plane = &vc4_plane->base;
 	ret = drm_universal_plane_init(dev, plane, 0xff,
 				       &vc4_plane_funcs,
-				       formats, ARRAY_SIZE(formats),
+				       formats, num_formats,
 				       type, NULL);
 
 	drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 4e52a0a..bf42a8e 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -187,7 +187,7 @@
 # define PV_VCONTROL_CONTINUOUS			BIT(1)
 # define PV_VCONTROL_VIDEN			BIT(0)
 
-#define PV_VSYNCD				0x08
+#define PV_VSYNCD_EVEN				0x08
 
 #define PV_HORZA				0x0c
 # define PV_HORZA_HBP_MASK			VC4_MASK(31, 16)
@@ -350,6 +350,17 @@
 # define SCALER_DISPCTRLX_HEIGHT_SHIFT		0
 
 #define SCALER_DISPBKGND0                       0x00000044
+# define SCALER_DISPBKGND_AUTOHS		BIT(31)
+# define SCALER_DISPBKGND_INTERLACE		BIT(30)
+# define SCALER_DISPBKGND_GAMMA			BIT(29)
+# define SCALER_DISPBKGND_TESTMODE_MASK		VC4_MASK(28, 25)
+# define SCALER_DISPBKGND_TESTMODE_SHIFT	25
+/* Enables filling the scaler line with the RGB value in the low 24
+ * bits before compositing.  Costs cycles, so should be skipped if
+ * opaque display planes will cover everything.
+ */
+# define SCALER_DISPBKGND_FILL			BIT(24)
+
 #define SCALER_DISPSTAT0                        0x00000048
 #define SCALER_DISPBASE0                        0x0000004c
 # define SCALER_DISPSTATX_MODE_MASK		VC4_MASK(31, 30)
@@ -362,6 +373,9 @@
 # define SCALER_DISPSTATX_EMPTY			BIT(28)
 #define SCALER_DISPCTRL1                        0x00000050
 #define SCALER_DISPBKGND1                       0x00000054
+#define SCALER_DISPBKGNDX(x)			(SCALER_DISPBKGND0 +        \
+						 (x) * (SCALER_DISPBKGND1 - \
+							SCALER_DISPBKGND0))
 #define SCALER_DISPSTAT1                        0x00000058
 #define SCALER_DISPSTATX(x)			(SCALER_DISPSTAT0 +        \
 						 (x) * (SCALER_DISPSTAT1 - \
@@ -456,6 +470,8 @@
 #define VC4_HDMI_TX_PHY_RESET_CTL		0x2c0
 
 #define VC4_HD_M_CTL				0x00c
+# define VC4_HD_M_REGISTER_FILE_STANDBY		(3 << 6)
+# define VC4_HD_M_RAM_STANDBY			(3 << 4)
 # define VC4_HD_M_SW_RST			BIT(2)
 # define VC4_HD_M_ENABLE			BIT(0)
 
@@ -503,7 +519,12 @@
 	HVS_PIXEL_FORMAT_RGB888 = 5,
 	HVS_PIXEL_FORMAT_RGBA6666 = 6,
 	/* 32bpp */
-	HVS_PIXEL_FORMAT_RGBA8888 = 7
+	HVS_PIXEL_FORMAT_RGBA8888 = 7,
+
+	HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE = 8,
+	HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9,
+	HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10,
+	HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11,
 };
 
 /* Note: the LSB is the rightmost character shown.  Only valid for
@@ -536,6 +557,21 @@
 #define SCALER_CTL0_ORDER_MASK			VC4_MASK(14, 13)
 #define SCALER_CTL0_ORDER_SHIFT			13
 
+#define SCALER_CTL0_SCL1_MASK			VC4_MASK(10, 8)
+#define SCALER_CTL0_SCL1_SHIFT			8
+
+#define SCALER_CTL0_SCL0_MASK			VC4_MASK(7, 5)
+#define SCALER_CTL0_SCL0_SHIFT			5
+
+#define SCALER_CTL0_SCL_H_PPF_V_PPF		0
+#define SCALER_CTL0_SCL_H_TPZ_V_PPF		1
+#define SCALER_CTL0_SCL_H_PPF_V_TPZ		2
+#define SCALER_CTL0_SCL_H_TPZ_V_TPZ		3
+#define SCALER_CTL0_SCL_H_PPF_V_NONE		4
+#define SCALER_CTL0_SCL_H_NONE_V_PPF		5
+#define SCALER_CTL0_SCL_H_NONE_V_TPZ		6
+#define SCALER_CTL0_SCL_H_TPZ_V_NONE		7
+
 /* Set to indicate no scaling. */
 #define SCALER_CTL0_UNITY			BIT(4)
 
@@ -551,6 +587,12 @@
 #define SCALER_POS0_START_X_MASK		VC4_MASK(11, 0)
 #define SCALER_POS0_START_X_SHIFT		0
 
+#define SCALER_POS1_SCL_HEIGHT_MASK		VC4_MASK(27, 16)
+#define SCALER_POS1_SCL_HEIGHT_SHIFT		16
+
+#define SCALER_POS1_SCL_WIDTH_MASK		VC4_MASK(11, 0)
+#define SCALER_POS1_SCL_WIDTH_SHIFT		0
+
 #define SCALER_POS2_ALPHA_MODE_MASK		VC4_MASK(31, 30)
 #define SCALER_POS2_ALPHA_MODE_SHIFT		30
 #define SCALER_POS2_ALPHA_MODE_PIPELINE		0
@@ -564,6 +606,80 @@
 #define SCALER_POS2_WIDTH_MASK			VC4_MASK(11, 0)
 #define SCALER_POS2_WIDTH_SHIFT			0
 
+/* Color Space Conversion words.  Some values are S2.8 signed
+ * integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1,
+ * 0x2: 2, 0x3: -1}
+ */
+/* bottom 8 bits of S2.8 contribution of Cr to Blue */
+#define SCALER_CSC0_COEF_CR_BLU_MASK		VC4_MASK(31, 24)
+#define SCALER_CSC0_COEF_CR_BLU_SHIFT		24
+/* Signed offset to apply to Y before CSC. (Y' = Y + YY_OFS) */
+#define SCALER_CSC0_COEF_YY_OFS_MASK		VC4_MASK(23, 16)
+#define SCALER_CSC0_COEF_YY_OFS_SHIFT		16
+/* Signed offset to apply to CB before CSC (Cb' = Cb - 128 + CB_OFS). */
+#define SCALER_CSC0_COEF_CB_OFS_MASK		VC4_MASK(15, 8)
+#define SCALER_CSC0_COEF_CB_OFS_SHIFT		8
+/* Signed offset to apply to CB before CSC (Cr' = Cr - 128 + CR_OFS). */
+#define SCALER_CSC0_COEF_CR_OFS_MASK		VC4_MASK(7, 0)
+#define SCALER_CSC0_COEF_CR_OFS_SHIFT		0
+#define SCALER_CSC0_ITR_R_601_5			0x00f00000
+#define SCALER_CSC0_ITR_R_709_3			0x00f00000
+#define SCALER_CSC0_JPEG_JFIF			0x00000000
+
+/* S2.8 contribution of Cb to Green */
+#define SCALER_CSC1_COEF_CB_GRN_MASK		VC4_MASK(31, 22)
+#define SCALER_CSC1_COEF_CB_GRN_SHIFT		22
+/* S2.8 contribution of Cr to Green */
+#define SCALER_CSC1_COEF_CR_GRN_MASK		VC4_MASK(21, 12)
+#define SCALER_CSC1_COEF_CR_GRN_SHIFT		12
+/* S2.8 contribution of Y to all of RGB */
+#define SCALER_CSC1_COEF_YY_ALL_MASK		VC4_MASK(11, 2)
+#define SCALER_CSC1_COEF_YY_ALL_SHIFT		2
+/* top 2 bits of S2.8 contribution of Cr to Blue */
+#define SCALER_CSC1_COEF_CR_BLU_MASK		VC4_MASK(1, 0)
+#define SCALER_CSC1_COEF_CR_BLU_SHIFT		0
+#define SCALER_CSC1_ITR_R_601_5			0xe73304a8
+#define SCALER_CSC1_ITR_R_709_3			0xf2b784a8
+#define SCALER_CSC1_JPEG_JFIF			0xea34a400
+
+/* S2.8 contribution of Cb to Red */
+#define SCALER_CSC2_COEF_CB_RED_MASK		VC4_MASK(29, 20)
+#define SCALER_CSC2_COEF_CB_RED_SHIFT		20
+/* S2.8 contribution of Cr to Red */
+#define SCALER_CSC2_COEF_CR_RED_MASK		VC4_MASK(19, 10)
+#define SCALER_CSC2_COEF_CR_RED_SHIFT		10
+/* S2.8 contribution of Cb to Blue */
+#define SCALER_CSC2_COEF_CB_BLU_MASK		VC4_MASK(19, 10)
+#define SCALER_CSC2_COEF_CB_BLU_SHIFT		10
+#define SCALER_CSC2_ITR_R_601_5			0x00066204
+#define SCALER_CSC2_ITR_R_709_3			0x00072a1c
+#define SCALER_CSC2_JPEG_JFIF			0x000599c5
+
+#define SCALER_TPZ0_VERT_RECALC			BIT(31)
+#define SCALER_TPZ0_SCALE_MASK			VC4_MASK(28, 8)
+#define SCALER_TPZ0_SCALE_SHIFT			8
+#define SCALER_TPZ0_IPHASE_MASK			VC4_MASK(7, 0)
+#define SCALER_TPZ0_IPHASE_SHIFT		0
+#define SCALER_TPZ1_RECIP_MASK			VC4_MASK(15, 0)
+#define SCALER_TPZ1_RECIP_SHIFT			0
+
+/* Skips interpolating coefficients to 64 phases, so just 8 are used.
+ * Required for nearest neighbor.
+ */
+#define SCALER_PPF_NOINTERP			BIT(31)
+/* Replaes the highest valued coefficient with one that makes all 4
+ * sum to unity.
+ */
+#define SCALER_PPF_AGC				BIT(30)
+#define SCALER_PPF_SCALE_MASK			VC4_MASK(24, 8)
+#define SCALER_PPF_SCALE_SHIFT			8
+#define SCALER_PPF_IPHASE_MASK			VC4_MASK(6, 0)
+#define SCALER_PPF_IPHASE_SHIFT			0
+
+#define SCALER_PPF_KERNEL_OFFSET_MASK		VC4_MASK(13, 0)
+#define SCALER_PPF_KERNEL_OFFSET_SHIFT		0
+#define SCALER_PPF_KERNEL_UNCACHED		BIT(31)
+
 #define SCALER_SRC_PITCH_MASK			VC4_MASK(15, 0)
 #define SCALER_SRC_PITCH_SHIFT			0
 
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 31de5d1..e6d3c60 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -268,6 +268,7 @@
 }
 
 static const struct of_device_id vc4_v3d_dt_match[] = {
+	{ .compatible = "brcm,bcm2835-v3d" },
 	{ .compatible = "brcm,vc4-v3d" },
 	{}
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a165f03..4854dac 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -237,13 +237,6 @@
 	return 0;
 }
 
-static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
-				       const struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -277,18 +270,10 @@
 static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
 	.enable        = virtio_gpu_crtc_enable,
 	.disable       = virtio_gpu_crtc_disable,
-	.mode_fixup    = virtio_gpu_crtc_mode_fixup,
 	.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
 	.atomic_check  = virtio_gpu_crtc_atomic_check,
 };
 
-static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
-				      const struct drm_display_mode *mode,
-				      struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
 				    struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode)
@@ -362,7 +347,6 @@
 }
 
 static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
-	.mode_fixup = virtio_gpu_enc_mode_fixup,
 	.mode_set   = virtio_gpu_enc_mode_set,
 	.enable     = virtio_gpu_enc_enable,
 	.disable    = virtio_gpu_enc_disable,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b40ed60..7f898cf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -118,7 +118,7 @@
 
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
 	.set_busid = drm_virtio_set_busid,
 	.load = virtio_gpu_driver_load,
 	.unload = virtio_gpu_driver_unload,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 572fb35..70b44a2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -68,10 +68,17 @@
 	struct virtio_gpu_object *bo;
 	uint32_t handle;
 
-	if (plane->fb) {
-		vgfb = to_virtio_gpu_framebuffer(plane->fb);
+	if (plane->state->fb) {
+		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
 		bo = gem_to_virtio_gpu_obj(vgfb->obj);
 		handle = bo->hw_res_handle;
+		if (bo->dumb) {
+			virtio_gpu_cmd_transfer_to_host_2d
+				(vgdev, handle, 0,
+				 cpu_to_le32(plane->state->crtc_w),
+				 cpu_to_le32(plane->state->crtc_h),
+				 plane->state->crtc_x, plane->state->crtc_y, NULL);
+		}
 	} else {
 		handle = 0;
 	}
@@ -84,6 +91,11 @@
 				   plane->state->crtc_h,
 				   plane->state->crtc_x,
 				   plane->state->crtc_y);
+	virtio_gpu_cmd_resource_flush(vgdev, handle,
+				      plane->state->crtc_x,
+				      plane->state->crtc_y,
+				      plane->state->crtc_w,
+				      plane->state->crtc_h);
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 24fb348..6cbb7d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -195,7 +195,7 @@
 		      DRM_MASTER | DRM_AUTH),
 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 		      vmw_kms_update_layout_ioctl,
-		      DRM_MASTER),
+		      DRM_MASTER | DRM_CONTROL_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 		      vmw_shader_define_ioctl,
 		      DRM_AUTH | DRM_RENDER_ALLOW),
@@ -972,15 +972,6 @@
 	return 0;
 }
 
-static void vmw_preclose(struct drm_device *dev,
-			 struct drm_file *file_priv)
-{
-	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-	struct vmw_private *dev_priv = vmw_priv(dev);
-
-	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
-}
-
 static void vmw_postclose(struct drm_device *dev,
 			 struct drm_file *file_priv)
 {
@@ -1011,7 +1002,6 @@
 	if (unlikely(vmw_fp == NULL))
 		return ret;
 
-	INIT_LIST_HEAD(&vmw_fp->fence_events);
 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
 	if (unlikely(vmw_fp->tfile == NULL))
 		goto out_no_tfile;
@@ -1214,6 +1204,7 @@
 	}
 
 	dev_priv->active_master = vmaster;
+	drm_sysfs_hotplug_event(dev);
 
 	return 0;
 }
@@ -1501,7 +1492,6 @@
 	.master_set = vmw_master_set,
 	.master_drop = vmw_master_drop,
 	.open = vmw_driver_open,
-	.preclose = vmw_preclose,
 	.postclose = vmw_postclose,
 	.set_busid = drm_pci_set_busid,
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 469cdd5..019a6ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20150810"
+#define VMWGFX_DRIVER_DATE "20160210"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_MINOR 10
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -80,7 +80,6 @@
 struct vmw_fpriv {
 	struct drm_master *locked_master;
 	struct ttm_object_file *tfile;
-	struct list_head fence_events;
 	bool gb_aware;
 };
 
@@ -408,8 +407,11 @@
 	void *fb_info;
 	enum vmw_display_unit_type active_display_unit;
 	struct vmw_legacy_display *ldu_priv;
-	struct vmw_screen_object_display *sou_priv;
 	struct vmw_overlay *overlay_priv;
+	struct drm_property *hotplug_mode_update_property;
+	struct drm_property *implicit_placement_property;
+	unsigned num_implicit;
+	struct vmw_framebuffer *implicit_fb;
 
 	/*
 	 * Context and surface management.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5da5de0..723ba16 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3009,6 +3009,26 @@
 	return ret;
 }
 
+/**
+ * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXGenMips body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_sr,
+				   cmd->body.shaderResourceViewId);
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -3297,7 +3317,7 @@
 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
 		    true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
 		    &vmw_cmd_dx_check_subresource, true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 8e689b4..e959df6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -71,7 +71,6 @@
  */
 struct vmw_event_fence_action {
 	struct vmw_fence_action action;
-	struct list_head fpriv_head;
 
 	struct drm_pending_event *event;
 	struct vmw_fence_obj *fence;
@@ -808,44 +807,6 @@
 }
 
 /**
- * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
- *
- * @fman: Pointer to a struct vmw_fence_manager
- * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
- * with pointers to a struct drm_file object about to be closed.
- *
- * This function removes all pending fence events with references to a
- * specific struct drm_file object about to be closed. The caller is required
- * to pass a list of all struct vmw_event_fence_action objects with such
- * events attached. This function is typically called before the
- * struct drm_file object's event management is taken down.
- */
-void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
-				struct list_head *event_list)
-{
-	struct vmw_event_fence_action *eaction;
-	struct drm_pending_event *event;
-	unsigned long irq_flags;
-
-	while (1) {
-		spin_lock_irqsave(&fman->lock, irq_flags);
-		if (list_empty(event_list))
-			goto out_unlock;
-		eaction = list_first_entry(event_list,
-					   struct vmw_event_fence_action,
-					   fpriv_head);
-		list_del_init(&eaction->fpriv_head);
-		event = eaction->event;
-		eaction->event = NULL;
-		spin_unlock_irqrestore(&fman->lock, irq_flags);
-		event->destroy(event);
-	}
-out_unlock:
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
-}
-
-
-/**
  * vmw_event_fence_action_seq_passed
  *
  * @action: The struct vmw_fence_action embedded in a struct
@@ -879,10 +840,8 @@
 		*eaction->tv_usec = tv.tv_usec;
 	}
 
-	list_del_init(&eaction->fpriv_head);
-	list_add_tail(&eaction->event->link, &file_priv->event_list);
+	drm_send_event_locked(dev, eaction->event);
 	eaction->event = NULL;
-	wake_up_all(&file_priv->event_wait);
 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
 }
 
@@ -899,12 +858,6 @@
 {
 	struct vmw_event_fence_action *eaction =
 		container_of(action, struct vmw_event_fence_action, action);
-	struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&fman->lock, irq_flags);
-	list_del(&eaction->fpriv_head);
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
 
 	vmw_fence_obj_unreference(&eaction->fence);
 	kfree(eaction);
@@ -984,8 +937,6 @@
 {
 	struct vmw_event_fence_action *eaction;
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
-	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-	unsigned long irq_flags;
 
 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
 	if (unlikely(eaction == NULL))
@@ -1002,10 +953,6 @@
 	eaction->tv_sec = tv_sec;
 	eaction->tv_usec = tv_usec;
 
-	spin_lock_irqsave(&fman->lock, irq_flags);
-	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
-	spin_unlock_irqrestore(&fman->lock, irq_flags);
-
 	vmw_fence_obj_add_action(fence, &eaction->action);
 
 	return 0;
@@ -1025,38 +972,26 @@
 	struct vmw_event_fence_pending *event;
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct drm_device *dev = fman->dev_priv->dev;
-	unsigned long irq_flags;
 	int ret;
 
-	spin_lock_irqsave(&dev->event_lock, irq_flags);
-
-	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
-	if (likely(ret == 0))
-		file_priv->event_space -= sizeof(event->event);
-
-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed to allocate event space for this file.\n");
-		goto out_no_space;
-	}
-
-
 	event = kzalloc(sizeof(*event), GFP_KERNEL);
 	if (unlikely(event == NULL)) {
 		DRM_ERROR("Failed to allocate an event.\n");
 		ret = -ENOMEM;
-		goto out_no_event;
+		goto out_no_space;
 	}
 
 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
 	event->event.base.length = sizeof(*event);
 	event->event.user_data = user_data;
 
-	event->base.event = &event->event.base;
-	event->base.file_priv = file_priv;
-	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
 
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate event space for this file.\n");
+		kfree(event);
+		goto out_no_space;
+	}
 
 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
 		ret = vmw_event_fence_action_queue(file_priv, fence,
@@ -1076,11 +1011,7 @@
 	return 0;
 
 out_no_queue:
-	event->base.destroy(&event->base);
-out_no_event:
-	spin_lock_irqsave(&dev->event_lock, irq_flags);
-	file_priv->event_space += sizeof(*event);
-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+	drm_event_cancel_free(dev, &event->base);
 out_no_space:
 	return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 8be6c29..83ae301 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -116,8 +116,6 @@
 				     struct drm_file *file_priv);
 extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 				 struct drm_file *file_priv);
-extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
-				       struct list_head *event_list);
 extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
 					struct vmw_fence_obj *fence,
 					struct drm_pending_event *event,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b221a8c..4742ec4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -236,8 +236,8 @@
 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
 
-	du->cursor_x = x + crtc->x;
-	du->cursor_y = y + crtc->y;
+	du->cursor_x = x + du->set_gui_x;
+	du->cursor_y = y + du->set_gui_y;
 
 	/*
 	 * FIXME: Unclear whether there's any global state touched by the
@@ -663,9 +663,8 @@
 		break;
 	case vmw_du_screen_object:
 		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
-						  clips, num_clips, increment,
-						  true,
-						  NULL);
+						  clips, NULL, num_clips,
+						  increment, true, NULL);
 		break;
 	case vmw_du_legacy:
 		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1109,6 +1108,22 @@
 	return 0;
 }
 
+static void
+vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
+{
+	if (dev_priv->hotplug_mode_update_property)
+		return;
+
+	dev_priv->hotplug_mode_update_property =
+		drm_property_create_range(dev_priv->dev,
+					  DRM_MODE_PROP_IMMUTABLE,
+					  "hotplug_mode_update", 0, 1);
+
+	if (!dev_priv->hotplug_mode_update_property)
+		return;
+
+}
+
 int vmw_kms_init(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
@@ -1121,6 +1136,9 @@
 	dev->mode_config.max_width = dev_priv->texture_max_width;
 	dev->mode_config.max_height = dev_priv->texture_max_height;
 
+	drm_mode_create_suggested_offset_properties(dev);
+	vmw_kms_create_hotplug_mode_update_property(dev_priv);
+
 	ret = vmw_kms_stdu_init_display(dev_priv);
 	if (ret) {
 		ret = vmw_kms_sou_init_display(dev_priv);
@@ -1360,15 +1378,28 @@
 			du->pref_active = true;
 			du->gui_x = rects[du->unit].x;
 			du->gui_y = rects[du->unit].y;
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_x_property,
+			   du->gui_x);
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_y_property,
+			   du->gui_y);
 		} else {
 			du->pref_width = 800;
 			du->pref_height = 600;
 			du->pref_active = false;
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_x_property,
+			   0);
+			drm_object_property_set_value
+			  (&con->base, dev->mode_config.suggested_y_property,
+			   0);
 		}
 		con->status = vmw_du_connector_detect(con, true);
 	}
 
 	mutex_unlock(&dev->mode_config.mutex);
+	drm_sysfs_hotplug_event(dev);
 
 	return 0;
 }
@@ -1591,6 +1622,12 @@
 				  struct drm_property *property,
 				  uint64_t val)
 {
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+	struct vmw_private *dev_priv = vmw_priv(connector->dev);
+
+	if (property == dev_priv->implicit_placement_property)
+		du->is_implicit = val;
+
 	return 0;
 }
 
@@ -2096,3 +2133,119 @@
 
 	return 0;
 }
+
+/**
+ * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ */
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+			struct vmw_display_unit *du)
+{
+	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+	if (du->active_implicit) {
+		if (--(dev_priv->num_implicit) == 0)
+			dev_priv->implicit_fb = NULL;
+		du->active_implicit = false;
+	}
+}
+
+/**
+ * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
+ *
+ * @vmw_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ * @vfb: The implicit framebuffer
+ *
+ * Registers a binding to an implicit framebuffer.
+ */
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+			struct vmw_display_unit *du,
+			struct vmw_framebuffer *vfb)
+{
+	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
+
+	if (!du->active_implicit && du->is_implicit) {
+		dev_priv->implicit_fb = vfb;
+		du->active_implicit = true;
+		dev_priv->num_implicit++;
+	}
+}
+
+/**
+ * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc we want to flip.
+ *
+ * Returns true or false depending whether it's OK to flip this crtc
+ * based on the criterion that we must not have more than one implicit
+ * frame-buffer at any one time.
+ */
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+			    struct drm_crtc *crtc)
+{
+	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+	if (!du->is_implicit)
+		return true;
+
+	if (dev_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * vmw_kms_update_implicit_fb - Update the implicit fb.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc the new implicit frame-buffer is bound to.
+ */
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+				struct drm_crtc *crtc)
+{
+	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+	struct vmw_framebuffer *vfb;
+
+	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+	if (!du->is_implicit)
+		return;
+
+	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
+		     dev_priv->implicit_fb != vfb);
+
+	dev_priv->implicit_fb = vfb;
+}
+
+/**
+ * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * property.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @immutable: Whether the property is immutable.
+ *
+ * Sets up the implicit placement property unless it's already set up.
+ */
+void
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+					   bool immutable)
+{
+	if (dev_priv->implicit_placement_property)
+		return;
+
+	dev_priv->implicit_placement_property =
+		drm_property_create_range(dev_priv->dev,
+					  immutable ?
+					  DRM_MODE_PROP_IMMUTABLE : 0,
+					  "implicit_placement", 0, 1);
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index edd8150..5720321 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -178,6 +178,9 @@
 	int gui_x;
 	int gui_y;
 	bool is_implicit;
+	bool active_implicit;
+	int set_gui_x;
+	int set_gui_y;
 };
 
 #define vmw_crtc_to_du(x) \
@@ -254,6 +257,18 @@
 			    struct drm_crtc **p_crtc,
 			    struct drm_display_mode **p_mode);
 void vmw_guess_mode_timing(struct drm_display_mode *mode);
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+			struct vmw_display_unit *du);
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+			struct vmw_display_unit *du,
+			struct vmw_framebuffer *vfb);
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+			    struct drm_crtc *crtc);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+				struct drm_crtc *crtc);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+						bool immutable);
+
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
@@ -287,6 +302,7 @@
 int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
 				struct vmw_framebuffer *framebuffer,
 				struct drm_clip_rect *clips,
+				struct drm_vmw_rect *vclips,
 				unsigned num_clips, int increment,
 				bool interruptible,
 				struct vmw_fence_obj **out_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b6fa44f..63ccd98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -288,6 +288,8 @@
 	crtc->y = set->y;
 	crtc->mode = *mode;
 	crtc->enabled = true;
+	ldu->base.set_gui_x = set->x;
+	ldu->base.set_gui_y = set->y;
 
 	vmw_ldu_add_active(dev_priv, ldu, vfb);
 
@@ -375,8 +377,19 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				      dev->mode_config.dirty_info_property,
-				      1);
+				   dev->mode_config.dirty_info_property,
+				   1);
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	if (dev_priv->implicit_placement_property)
+		drm_object_attach_property
+			(&connector->base,
+			 dev_priv->implicit_placement_property,
+			 1);
 
 	return 0;
 }
@@ -412,6 +425,8 @@
 	if (ret != 0)
 		goto err_vblank_cleanup;
 
+	vmw_kms_create_implicit_placement_property(dev_priv, true);
+
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
 		for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 			vmw_ldu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index c5a1a08..0ea22fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -74,19 +74,6 @@
 	SVGA3dCmdBlitSurfaceToScreen body;
 };
 
-
-/*
- * Other structs.
- */
-
-struct vmw_screen_object_display {
-	unsigned num_implicit;
-
-	struct vmw_framebuffer *implicit_fb;
-	SVGAFifoCmdDefineGMRFB cur;
-	struct vmw_dma_buffer *pinned_gmrfb;
-};
-
 /**
  * Display unit using screen objects.
  */
@@ -97,7 +84,6 @@
 	struct vmw_dma_buffer *buffer; /**< Backing store buffer */
 
 	bool defined;
-	bool active_implicit;
 };
 
 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
@@ -116,33 +102,6 @@
 	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
 }
 
-static void vmw_sou_del_active(struct vmw_private *vmw_priv,
-			       struct vmw_screen_object_unit *sou)
-{
-	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
-	if (sou->active_implicit) {
-		if (--(ld->num_implicit) == 0)
-			ld->implicit_fb = NULL;
-		sou->active_implicit = false;
-	}
-}
-
-static void vmw_sou_add_active(struct vmw_private *vmw_priv,
-			       struct vmw_screen_object_unit *sou,
-			       struct vmw_framebuffer *vfb)
-{
-	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
-	BUG_ON(!ld->num_implicit && ld->implicit_fb);
-
-	if (!sou->active_implicit && sou->base.is_implicit) {
-		ld->implicit_fb = vfb;
-		sou->active_implicit = true;
-		ld->num_implicit++;
-	}
-}
-
 /**
  * Send the fifo command to create a screen.
  */
@@ -185,6 +144,8 @@
 		cmd->obj.root.x = sou->base.gui_x;
 		cmd->obj.root.y = sou->base.gui_y;
 	}
+	sou->base.set_gui_x = cmd->obj.root.x;
+	sou->base.set_gui_y = cmd->obj.root.y;
 
 	/* Ok to assume that buffer is pinned in vram */
 	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
@@ -323,13 +284,13 @@
 		return -EINVAL;
 	}
 
-	/* sou only supports one fb active at the time */
+	/* Only one active implicit frame-buffer at a time. */
 	if (sou->base.is_implicit &&
-	    dev_priv->sou_priv->implicit_fb && vfb &&
-	    !(dev_priv->sou_priv->num_implicit == 1 &&
-	      sou->active_implicit) &&
-	    dev_priv->sou_priv->implicit_fb != vfb) {
-		DRM_ERROR("Multiple framebuffers not supported\n");
+	    dev_priv->implicit_fb && vfb &&
+	    !(dev_priv->num_implicit == 1 &&
+	      sou->base.active_implicit) &&
+	    dev_priv->implicit_fb != vfb) {
+		DRM_ERROR("Multiple implicit framebuffers not supported.\n");
 		return -EINVAL;
 	}
 
@@ -351,7 +312,7 @@
 		crtc->y = 0;
 		crtc->enabled = false;
 
-		vmw_sou_del_active(dev_priv, sou);
+		vmw_kms_del_active(dev_priv, &sou->base);
 
 		vmw_sou_backing_free(dev_priv, sou);
 
@@ -415,7 +376,7 @@
 		return ret;
 	}
 
-	vmw_sou_add_active(dev_priv, sou, vfb);
+	vmw_kms_add_active(dev_priv, &sou->base, vfb);
 
 	connector->encoder = encoder;
 	encoder->crtc = crtc;
@@ -428,39 +389,6 @@
 	return 0;
 }
 
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
-					    struct drm_crtc *crtc)
-{
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
-	if (!sou->base.is_implicit)
-		return true;
-
-	if (dev_priv->sou_priv->num_implicit != 1)
-		return false;
-
-	return true;
-}
-
-/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
- */
-static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
-				       struct drm_crtc *crtc)
-{
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
-	BUG_ON(!sou->base.is_implicit);
-
-	dev_priv->sou_priv->implicit_fb =
-		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
-}
-
 static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
 				  struct drm_framebuffer *fb,
 				  struct drm_pending_vblank_event *event,
@@ -470,30 +398,27 @@
 	struct drm_framebuffer *old_fb = crtc->primary->fb;
 	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
 	struct vmw_fence_obj *fence = NULL;
-	struct drm_clip_rect clips;
+	struct drm_vmw_rect vclips;
 	int ret;
 
-	/* require ScreenObject support for page flipping */
-	if (!dev_priv->sou_priv)
-		return -ENOSYS;
-
-	if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+	if (!vmw_kms_crtc_flippable(dev_priv, crtc))
 		return -EINVAL;
 
 	crtc->primary->fb = fb;
 
 	/* do a full screen dirty update */
-	clips.x1 = clips.y1 = 0;
-	clips.x2 = fb->width;
-	clips.y2 = fb->height;
+	vclips.x = crtc->x;
+	vclips.y = crtc->y;
+	vclips.w = crtc->mode.hdisplay;
+	vclips.h = crtc->mode.vdisplay;
 
 	if (vfb->dmabuf)
 		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
-						  &clips, 1, 1,
+						  NULL, &vclips, 1, 1,
 						  true, &fence);
 	else
 		ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
-						   &clips, NULL, NULL,
+						   NULL, &vclips, NULL,
 						   0, 0, 1, 1, &fence);
 
 
@@ -521,7 +446,7 @@
 	vmw_fence_obj_unreference(&fence);
 
 	if (vmw_crtc_to_du(crtc)->is_implicit)
-		vmw_sou_update_implicit_fb(dev_priv, crtc);
+		vmw_kms_update_implicit_fb(dev_priv, crtc);
 
 	return ret;
 
@@ -586,13 +511,12 @@
 	encoder = &sou->base.encoder;
 	connector = &sou->base.connector;
 
-	sou->active_implicit = false;
-
+	sou->base.active_implicit = false;
 	sou->base.pref_active = (unit == 0);
 	sou->base.pref_width = dev_priv->initial_width;
 	sou->base.pref_height = dev_priv->initial_height;
 	sou->base.pref_mode = NULL;
-	sou->base.is_implicit = true;
+	sou->base.is_implicit = false;
 
 	drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
@@ -611,8 +535,19 @@
 	drm_mode_crtc_set_gamma_size(crtc, 256);
 
 	drm_object_attach_property(&connector->base,
-				      dev->mode_config.dirty_info_property,
-				      1);
+				   dev->mode_config.dirty_info_property,
+				   1);
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	if (dev_priv->implicit_placement_property)
+		drm_object_attach_property
+			(&connector->base,
+			 dev_priv->implicit_placement_property,
+			 sou->base.is_implicit);
 
 	return 0;
 }
@@ -622,11 +557,6 @@
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
 
-	if (dev_priv->sou_priv) {
-		DRM_INFO("sou system already on\n");
-		return -EINVAL;
-	}
-
 	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
 		DRM_INFO("Not using screen objects,"
 			 " missing cap SCREEN_OBJECT_2\n");
@@ -634,21 +564,19 @@
 	}
 
 	ret = -ENOMEM;
-	dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
-	if (unlikely(!dev_priv->sou_priv))
-		goto err_no_mem;
-
-	dev_priv->sou_priv->num_implicit = 0;
-	dev_priv->sou_priv->implicit_fb = NULL;
+	dev_priv->num_implicit = 0;
+	dev_priv->implicit_fb = NULL;
 
 	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
 	if (unlikely(ret != 0))
-		goto err_free;
+		return ret;
 
 	ret = drm_mode_create_dirty_info_property(dev);
 	if (unlikely(ret != 0))
 		goto err_vblank_cleanup;
 
+	vmw_kms_create_implicit_placement_property(dev_priv, false);
+
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 		vmw_sou_init(dev_priv, i);
 
@@ -660,10 +588,6 @@
 
 err_vblank_cleanup:
 	drm_vblank_cleanup(dev);
-err_free:
-	kfree(dev_priv->sou_priv);
-	dev_priv->sou_priv = NULL;
-err_no_mem:
 	return ret;
 }
 
@@ -671,13 +595,8 @@
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	if (!dev_priv->sou_priv)
-		return -ENOSYS;
-
 	drm_vblank_cleanup(dev);
 
-	kfree(dev_priv->sou_priv);
-
 	return 0;
 }
 
@@ -738,6 +657,11 @@
 	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
 	int i;
 
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
 	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
 	cmd->header.size = sizeof(cmd->body) + region_size;
 
@@ -875,6 +799,11 @@
  */
 static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
 {
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
 	vmw_fifo_commit(dirty->dev_priv,
 			sizeof(struct vmw_kms_sou_dmabuf_blit) *
 			dirty->num_hits);
@@ -909,6 +838,8 @@
  * @dev_priv: Pointer to the device private structure.
  * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  * @clips: Array of clip rects.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
  * @num_clips: Number of clip rects in @clips.
  * @increment: Increment to use when looping over @clips.
  * @interruptible: Whether to perform waits interruptible if possible.
@@ -922,6 +853,7 @@
 int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
 				struct vmw_framebuffer *framebuffer,
 				struct drm_clip_rect *clips,
+				struct drm_vmw_rect *vclips,
 				unsigned num_clips, int increment,
 				bool interruptible,
 				struct vmw_fence_obj **out_fence)
@@ -945,7 +877,7 @@
 	dirty.clip = vmw_sou_dmabuf_clip;
 	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
 		num_clips;
-	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
 				   0, 0, num_clips, increment, &dirty);
 	vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
 
@@ -967,6 +899,11 @@
  */
 static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
 {
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
 	vmw_fifo_commit(dirty->dev_priv,
 			sizeof(struct vmw_kms_sou_readback_blit) *
 			dirty->num_hits);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 4ef5ffd..b949102 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -96,7 +96,6 @@
  *               content_vfbs dimensions, then this is a pointer into the
  *               corresponding field in content_vfbs.  If not, then this
  *               is a separate buffer to which content_vfbs will blit to.
- * @content_fb: holds the rendered content, can be a surface or DMA buffer
  * @content_type:  content_fb type
  * @defined:  true if the current display unit has been initialized
  */
@@ -104,8 +103,6 @@
 	struct vmw_display_unit base;
 
 	struct vmw_surface     *display_srf;
-	struct drm_framebuffer *content_fb;
-
 	enum stdu_content_type content_fb_type;
 
 	bool defined;
@@ -122,22 +119,6 @@
  *****************************************************************************/
 
 /**
- * vmw_stdu_pin_display - pins the resource associated with the display surface
- *
- * @stdu: contains the display surface
- *
- * Since the display surface can either be a private surface allocated by us,
- * or it can point to the content surface, we use this function to not pin the
- * same resource twice.
- */
-static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
-{
-	return vmw_resource_pin(&stdu->display_srf->res, false);
-}
-
-
-
-/**
  * vmw_stdu_unpin_display - unpins the resource associated with display surface
  *
  * @stdu: contains the display surface
@@ -153,13 +134,7 @@
 		struct vmw_resource *res = &stdu->display_srf->res;
 
 		vmw_resource_unpin(res);
-
-		if (stdu->content_fb_type != SAME_AS_DISPLAY) {
-			vmw_resource_unreference(&res);
-			stdu->content_fb_type = SAME_AS_DISPLAY;
-		}
-
-		stdu->display_srf = NULL;
+		vmw_surface_unreference(&stdu->display_srf);
 	}
 }
 
@@ -185,6 +160,9 @@
  *
  * @dev_priv:  VMW DRM device
  * @stdu: display unit to create a Screen Target for
+ * @mode: The mode to set.
+ * @crtc_x: X coordinate of screen target relative to framebuffer origin.
+ * @crtc_y: Y coordinate of screen target relative to framebuffer origin.
  *
  * Creates a STDU that we can used later.  This function is called whenever the
  * framebuffer size changes.
@@ -193,7 +171,9 @@
  * 0 on success, error code on failure
  */
 static int vmw_stdu_define_st(struct vmw_private *dev_priv,
-			      struct vmw_screen_target_display_unit *stdu)
+			      struct vmw_screen_target_display_unit *stdu,
+			      struct drm_display_mode *mode,
+			      int crtc_x, int crtc_y)
 {
 	struct {
 		SVGA3dCmdHeader header;
@@ -211,17 +191,19 @@
 	cmd->header.size = sizeof(cmd->body);
 
 	cmd->body.stid   = stdu->base.unit;
-	cmd->body.width  = stdu->display_srf->base_size.width;
-	cmd->body.height = stdu->display_srf->base_size.height;
+	cmd->body.width  = mode->hdisplay;
+	cmd->body.height = mode->vdisplay;
 	cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
 	cmd->body.dpi    = 0;
-	cmd->body.xRoot  = stdu->base.crtc.x;
-	cmd->body.yRoot  = stdu->base.crtc.y;
-
-	if (!stdu->base.is_implicit) {
+	if (stdu->base.is_implicit) {
+		cmd->body.xRoot  = crtc_x;
+		cmd->body.yRoot  = crtc_y;
+	} else {
 		cmd->body.xRoot  = stdu->base.gui_x;
 		cmd->body.yRoot  = stdu->base.gui_y;
 	}
+	stdu->base.set_gui_x = cmd->body.xRoot;
+	stdu->base.set_gui_y = cmd->body.yRoot;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
@@ -392,126 +374,43 @@
 	return ret;
 }
 
-
-
 /**
- * vmw_stdu_crtc_set_config - Sets a mode
+ * vmw_stdu_bind_fb - Bind an fb to a defined screen target
  *
- * @set:  mode parameters
- *
- * This function is the device-specific portion of the DRM CRTC mode set.
- * For the SVGA device, we do this by defining a Screen Target, binding a
- * GB Surface to that target, and finally update the screen target.
+ * @dev_priv: Pointer to a device private struct.
+ * @crtc: The crtc holding the screen target.
+ * @mode: The mode currently used by the screen target. Must be non-NULL.
+ * @new_fb: The new framebuffer to bind. Must be non-NULL.
  *
  * RETURNS:
- * 0 on success, error code otherwise
+ * 0 on success, error code on failure.
  */
-static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+			    struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    struct drm_framebuffer *new_fb)
 {
-	struct vmw_private *dev_priv;
-	struct vmw_screen_target_display_unit *stdu;
-	struct vmw_framebuffer *vfb;
+	struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
+	struct vmw_surface *new_display_srf = NULL;
+	enum stdu_content_type new_content_type;
 	struct vmw_framebuffer_surface *new_vfbs;
-	struct drm_display_mode *mode;
-	struct drm_framebuffer  *new_fb;
-	struct drm_crtc      *crtc;
-	struct drm_encoder   *encoder;
-	struct drm_connector *connector;
-	int    ret;
+	int ret;
 
+	WARN_ON_ONCE(!stdu->defined);
 
-	if (!set || !set->crtc)
-		return -EINVAL;
+	if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
+	    new_fb->height == mode->vdisplay)
+		new_content_type = SAME_AS_DISPLAY;
+	else if (vfb->dmabuf)
+		new_content_type = SEPARATE_DMA;
+	else
+		new_content_type = SEPARATE_SURFACE;
 
-	crtc     = set->crtc;
-	crtc->x  = set->x;
-	crtc->y  = set->y;
-	stdu     = vmw_crtc_to_stdu(crtc);
-	mode     = set->mode;
-	new_fb   = set->fb;
-	dev_priv = vmw_priv(crtc->dev);
-
-
-	if (set->num_connectors > 1) {
-		DRM_ERROR("Too many connectors\n");
-		return -EINVAL;
-	}
-
-	if (set->num_connectors == 1 &&
-	    set->connectors[0] != &stdu->base.connector) {
-		DRM_ERROR("Connectors don't match %p %p\n",
-			set->connectors[0], &stdu->base.connector);
-		return -EINVAL;
-	}
-
-
-	/* Since they always map one to one these are safe */
-	connector = &stdu->base.connector;
-	encoder   = &stdu->base.encoder;
-
-
-	/*
-	 * After this point the CRTC will be considered off unless a new fb
-	 * is bound
-	 */
-	if (stdu->defined) {
-		/* Unbind current surface by binding an invalid one */
-		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
-		if (unlikely(ret != 0))
-			return ret;
-
-		/* Update Screen Target, display will now be blank */
-		if (crtc->primary->fb) {
-			vmw_stdu_update_st(dev_priv, stdu);
-			if (unlikely(ret != 0))
-				return ret;
-		}
-
-		crtc->primary->fb  = NULL;
-		crtc->enabled      = false;
-		encoder->crtc      = NULL;
-		connector->encoder = NULL;
-
-		vmw_stdu_unpin_display(stdu);
-		stdu->content_fb      = NULL;
-		stdu->content_fb_type = SAME_AS_DISPLAY;
-
-		ret = vmw_stdu_destroy_st(dev_priv, stdu);
-		/* The hardware is hung, give up */
-		if (unlikely(ret != 0))
-			return ret;
-	}
-
-
-	/* Any of these conditions means the caller wants CRTC off */
-	if (set->num_connectors == 0 || !mode || !new_fb)
-		return 0;
-
-
-	if (set->x + mode->hdisplay > new_fb->width ||
-	    set->y + mode->vdisplay > new_fb->height) {
-		DRM_ERROR("Set outside of framebuffer\n");
-		return -EINVAL;
-	}
-
-	stdu->content_fb = new_fb;
-	vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
-
-	if (vfb->dmabuf)
-		stdu->content_fb_type = SEPARATE_DMA;
-
-	/*
-	 * If the requested mode is different than the width and height
-	 * of the FB or if the content buffer is a DMA buf, then allocate
-	 * a display FB that matches the dimension of the mode
-	 */
-	if (mode->hdisplay != new_fb->width  ||
-	    mode->vdisplay != new_fb->height ||
-	    stdu->content_fb_type != SAME_AS_DISPLAY) {
+	if (new_content_type != SAME_AS_DISPLAY &&
+	    !stdu->display_srf) {
 		struct vmw_surface content_srf;
 		struct drm_vmw_size display_base_size = {0};
-		struct vmw_surface *display_srf;
-
 
 		display_base_size.width  = mode->hdisplay;
 		display_base_size.height = mode->vdisplay;
@@ -521,7 +420,7 @@
 		 * If content buffer is a DMA buf, then we have to construct
 		 * surface info
 		 */
-		if (stdu->content_fb_type == SEPARATE_DMA) {
+		if (new_content_type == SEPARATE_DMA) {
 
 			switch (new_fb->bits_per_pixel) {
 			case 32:
@@ -538,17 +437,13 @@
 
 			default:
 				DRM_ERROR("Invalid format\n");
-				ret = -EINVAL;
-				goto err_unref_content;
+				return -EINVAL;
 			}
 
 			content_srf.flags             = 0;
 			content_srf.mip_levels[0]     = 1;
 			content_srf.multisample_count = 0;
 		} else {
-
-			stdu->content_fb_type = SEPARATE_SURFACE;
-
 			new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
 			content_srf = *new_vfbs->surface;
 		}
@@ -563,26 +458,136 @@
 				content_srf.multisample_count,
 				0,
 				display_base_size,
-				&display_srf);
+				&new_display_srf);
 		if (unlikely(ret != 0)) {
-			DRM_ERROR("Cannot allocate a display FB.\n");
-			goto err_unref_content;
+			DRM_ERROR("Could not allocate screen target surface.\n");
+			return ret;
 		}
-
-		stdu->display_srf = display_srf;
-	} else {
+	} else if (new_content_type == SAME_AS_DISPLAY) {
 		new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
-		stdu->display_srf = new_vfbs->surface;
+		new_display_srf = vmw_surface_reference(new_vfbs->surface);
 	}
 
+	if (new_display_srf) {
+		/* Pin new surface before flipping */
+		ret = vmw_resource_pin(&new_display_srf->res, false);
+		if (ret)
+			goto out_srf_unref;
 
-	ret = vmw_stdu_pin_display(stdu);
-	if (unlikely(ret != 0)) {
-		stdu->display_srf = NULL;
-		goto err_unref_content;
+		ret = vmw_stdu_bind_st(dev_priv, stdu, &new_display_srf->res);
+		if (ret)
+			goto out_srf_unpin;
+
+		/* Unpin and unreference old surface */
+		vmw_stdu_unpin_display(stdu);
+
+		/* Transfer the reference */
+		stdu->display_srf = new_display_srf;
+		new_display_srf = NULL;
 	}
 
-	vmw_svga_enable(dev_priv);
+	crtc->primary->fb = new_fb;
+	stdu->content_fb_type = new_content_type;
+	return 0;
+
+out_srf_unpin:
+	vmw_resource_unpin(&new_display_srf->res);
+out_srf_unref:
+	vmw_surface_unreference(&new_display_srf);
+	return ret;
+}
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set:  mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_framebuffer *vfb;
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer  *new_fb;
+	struct drm_crtc      *crtc;
+	struct drm_encoder   *encoder;
+	struct drm_connector *connector;
+	bool turning_off;
+	int    ret;
+
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	crtc     = set->crtc;
+	stdu     = vmw_crtc_to_stdu(crtc);
+	mode     = set->mode;
+	new_fb   = set->fb;
+	dev_priv = vmw_priv(crtc->dev);
+	turning_off = set->num_connectors == 0 || !mode || !new_fb;
+	vfb = (new_fb) ? vmw_framebuffer_to_vfb(new_fb) : NULL;
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("Too many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &stdu->base.connector) {
+		DRM_ERROR("Connectors don't match %p %p\n",
+			set->connectors[0], &stdu->base.connector);
+		return -EINVAL;
+	}
+
+	if (!turning_off && (set->x + mode->hdisplay > new_fb->width ||
+			     set->y + mode->vdisplay > new_fb->height)) {
+		DRM_ERROR("Set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	/* Only one active implicit frame-buffer at a time. */
+	if (!turning_off && stdu->base.is_implicit && dev_priv->implicit_fb &&
+	    !(dev_priv->num_implicit == 1 && stdu->base.active_implicit)
+	    && dev_priv->implicit_fb != vfb) {
+		DRM_ERROR("Multiple implicit framebuffers not supported.\n");
+		return -EINVAL;
+	}
+
+	/* Since they always map one to one these are safe */
+	connector = &stdu->base.connector;
+	encoder   = &stdu->base.encoder;
+
+	if (stdu->defined) {
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (ret)
+			return ret;
+
+		vmw_stdu_unpin_display(stdu);
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+		vmw_kms_del_active(dev_priv, &stdu->base);
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		if (ret)
+			return ret;
+
+		crtc->primary->fb = NULL;
+		crtc->enabled = false;
+		encoder->crtc = NULL;
+		connector->encoder = NULL;
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+		crtc->x = set->x;
+		crtc->y = set->y;
+	}
+
+	if (turning_off)
+		return 0;
 
 	/*
 	 * Steps to displaying a surface, assume surface is already
@@ -592,35 +597,33 @@
 	 *   3.  update that screen target (this is done later by
 	 *       vmw_kms_stdu_do_surface_dirty_or_present)
 	 */
-	ret = vmw_stdu_define_st(dev_priv, stdu);
-	if (unlikely(ret != 0))
-		goto err_unpin_display_and_content;
+	/*
+	 * Note on error handling: We can't really restore the crtc to
+	 * it's original state on error, but we at least update the
+	 * current state to what's submitted to hardware to enable
+	 * future recovery.
+	 */
+	vmw_svga_enable(dev_priv);
+	ret = vmw_stdu_define_st(dev_priv, stdu, mode, set->x, set->y);
+	if (ret)
+		return ret;
 
-	ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
-	if (unlikely(ret != 0))
-		goto err_unpin_destroy_st;
+	crtc->x = set->x;
+	crtc->y = set->y;
+	crtc->mode = *mode;
 
+	ret = vmw_stdu_bind_fb(dev_priv, crtc, mode, new_fb);
+	if (ret)
+		return ret;
 
+	vmw_kms_add_active(dev_priv, &stdu->base, vfb);
+	crtc->enabled = true;
 	connector->encoder = encoder;
 	encoder->crtc      = crtc;
 
-	crtc->mode    = *mode;
-	crtc->primary->fb = new_fb;
-	crtc->enabled = true;
-
-	return ret;
-
-err_unpin_destroy_st:
-	vmw_stdu_destroy_st(dev_priv, stdu);
-err_unpin_display_and_content:
-	vmw_stdu_unpin_display(stdu);
-err_unref_content:
-	stdu->content_fb = NULL;
-	return ret;
+	return 0;
 }
 
-
-
 /**
  * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
  *
@@ -648,59 +651,34 @@
 {
 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 	struct vmw_screen_target_display_unit *stdu;
+	struct drm_vmw_rect vclips;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
 	int ret;
 
-	if (crtc == NULL)
-		return -EINVAL;
-
 	dev_priv          = vmw_priv(crtc->dev);
 	stdu              = vmw_crtc_to_stdu(crtc);
-	crtc->primary->fb = new_fb;
-	stdu->content_fb  = new_fb;
 
-	if (stdu->display_srf) {
-		/*
-		 * If the display surface is the same as the content surface
-		 * then remove the reference
-		 */
-		if (stdu->content_fb_type == SAME_AS_DISPLAY) {
-			if (stdu->defined) {
-				/* Unbind the current surface */
-				ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
-				if (unlikely(ret != 0))
-					goto err_out;
-			}
-			vmw_stdu_unpin_display(stdu);
-			stdu->display_srf = NULL;
-		}
-	}
+	if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
+		return -EINVAL;
 
+	ret = vmw_stdu_bind_fb(dev_priv, crtc, &crtc->mode, new_fb);
+	if (ret)
+		return ret;
 
-	if (!new_fb) {
-		/* Blanks the display */
-		(void) vmw_stdu_update_st(dev_priv, stdu);
+	if (stdu->base.is_implicit)
+		vmw_kms_update_implicit_fb(dev_priv, crtc);
 
-		return 0;
-	}
-
-
-	if (stdu->content_fb_type == SAME_AS_DISPLAY) {
-		stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
-		ret = vmw_stdu_pin_display(stdu);
-		if (ret) {
-			stdu->display_srf = NULL;
-			goto err_out;
-		}
-
-		/* Bind display surface */
-		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
-		if (unlikely(ret != 0))
-			goto err_unpin_display_and_content;
-	}
-
-	/* Update display surface: after this point everything is bound */
-	ret = vmw_stdu_update_st(dev_priv, stdu);
-	if (unlikely(ret != 0))
+	vclips.x = crtc->x;
+	vclips.y = crtc->y;
+	vclips.w = crtc->mode.hdisplay;
+	vclips.h = crtc->mode.vdisplay;
+	if (vfb->dmabuf)
+		ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips,
+				       1, 1, true, false);
+	else
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips,
+						 NULL, 0, 0, 1, 1, NULL);
+	if (ret)
 		return ret;
 
 	if (event) {
@@ -721,14 +699,7 @@
 		vmw_fifo_flush(dev_priv, false);
 	}
 
-	return ret;
-
-err_unpin_display_and_content:
-	vmw_stdu_unpin_display(stdu);
-err_out:
-	crtc->primary->fb = NULL;
-	stdu->content_fb = NULL;
-	return ret;
+	return 0;
 }
 
 
@@ -1138,7 +1109,7 @@
 	stdu->base.pref_active = (unit == 0);
 	stdu->base.pref_width  = dev_priv->initial_width;
 	stdu->base.pref_height = dev_priv->initial_height;
-	stdu->base.is_implicit = true;
+	stdu->base.is_implicit = false;
 
 	drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
@@ -1159,7 +1130,17 @@
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.dirty_info_property,
 				   1);
-
+	drm_object_attach_property(&connector->base,
+				   dev_priv->hotplug_mode_update_property, 1);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_x_property, 0);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.suggested_y_property, 0);
+	if (dev_priv->implicit_placement_property)
+		drm_object_attach_property
+			(&connector->base,
+			 dev_priv->implicit_placement_property,
+			 stdu->base.is_implicit);
 	return 0;
 }
 
@@ -1224,6 +1205,8 @@
 
 	dev_priv->active_display_unit = vmw_du_screen_target;
 
+	vmw_kms_create_implicit_placement_property(dev_priv, false);
+
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
 		ret = vmw_stdu_init(dev_priv, i);
 
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index dd2dbb9..c27858a 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -83,8 +83,10 @@
 		if (of_match_node(driver->subdevs, np) &&
 		    of_device_is_available(np)) {
 			err = host1x_subdev_add(device, np);
-			if (err < 0)
+			if (err < 0) {
+				of_node_put(np);
 				return err;
+			}
 		}
 	}
 
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index defa799..b4515d5 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -225,7 +225,7 @@
 	return 0;
 }
 
-static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
 {
 	int i = 0;
 	u32 last_page = ~0;
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index d3ad534..2f29780 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -171,6 +171,7 @@
 		u32 bus_format, u32 width)
 {
 	struct ipu_dc_priv *priv = dc->priv;
+	int addr, sync;
 	u32 reg = 0;
 	int map;
 
@@ -182,41 +183,39 @@
 		return map;
 	}
 
+	/*
+	 * In interlaced mode we need more counters to create the asymmetric
+	 * per-field VSYNC signals. The pixel active signal synchronising DC
+	 * to DI moves to signal generator #6 (see ipu-di.c). In progressive
+	 * mode counter #5 is used.
+	 */
+	sync = interlaced ? 6 : 5;
+
+	/* Reserve 5 microcode template words for each DI */
+	if (dc->di)
+		addr = 5;
+	else
+		addr = 0;
+
 	if (interlaced) {
-		int addr;
-
-		if (dc->di)
-			addr = 1;
-		else
-			addr = 0;
-
 		dc_link_event(dc, DC_EVT_NL, addr, 3);
 		dc_link_event(dc, DC_EVT_EOL, addr, 2);
 		dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
 
 		/* Init template microcode */
-		dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1);
+		dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
 	} else {
-		if (dc->di) {
-			dc_link_event(dc, DC_EVT_NL, 2, 3);
-			dc_link_event(dc, DC_EVT_EOL, 3, 2);
-			dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1);
-			/* Init template microcode */
-			dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
-			dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
-			dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
-			dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
-		} else {
-			dc_link_event(dc, DC_EVT_NL, 5, 3);
-			dc_link_event(dc, DC_EVT_EOL, 6, 2);
-			dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1);
-			/* Init template microcode */
-			dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
-			dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
-			dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
-			dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
-		}
+		dc_link_event(dc, DC_EVT_NL, addr + 2, 3);
+		dc_link_event(dc, DC_EVT_EOL, addr + 3, 2);
+		dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1);
+
+		/* Init template microcode */
+		dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
+		dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
+		dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+		dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
 	}
+
 	dc_link_event(dc, DC_EVT_NF, 0, 0);
 	dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
 	dc_link_event(dc, DC_EVT_EOF, 0, 0);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 665ab9f..cbd7c98 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -74,9 +74,17 @@
  * there can thus be up to three clients: Two vga clients (GPUs) and one audio
  * client (on the discrete GPU). The code is mostly prepared to support
  * machines with more than two GPUs should they become available.
+ *
  * The GPU to which the outputs are currently switched is called the
  * active client in vga_switcheroo parlance. The GPU not in use is the
- * inactive client.
+ * inactive client. When the inactive client's DRM driver is loaded,
+ * it will be unable to probe the panel's EDID and hence depends on
+ * VBIOS to provide its display modes. If the VBIOS modes are bogus or
+ * if there is no VBIOS at all (which is common on the MacBook Pro),
+ * a client may alternatively request that the DDC lines are temporarily
+ * switched to it, provided that the handler supports this. Switching
+ * only the DDC lines and not the entire output avoids unnecessary
+ * flickering.
  */
 
 /**
@@ -126,6 +134,10 @@
  * 	(counting only vga clients, not audio clients)
  * @clients: list of registered clients
  * @handler: registered handler
+ * @handler_flags: flags of registered handler
+ * @mux_hw_lock: protects mux state
+ *	(in particular while DDC lines are temporarily switched)
+ * @old_ddc_owner: client to which DDC lines will be switched back on unlock
  *
  * vga_switcheroo private data. Currently only one vga_switcheroo instance
  * per system is supported.
@@ -142,6 +154,9 @@
 	struct list_head clients;
 
 	const struct vga_switcheroo_handler *handler;
+	enum vga_switcheroo_handler_flags_t handler_flags;
+	struct mutex mux_hw_lock;
+	int old_ddc_owner;
 };
 
 #define ID_BIT_AUDIO		0x100
@@ -156,6 +171,7 @@
 /* only one switcheroo per system */
 static struct vgasr_priv vgasr_priv = {
 	.clients = LIST_HEAD_INIT(vgasr_priv.clients),
+	.mux_hw_lock = __MUTEX_INITIALIZER(vgasr_priv.mux_hw_lock),
 };
 
 static bool vga_switcheroo_ready(void)
@@ -190,13 +206,15 @@
 /**
  * vga_switcheroo_register_handler() - register handler
  * @handler: handler callbacks
+ * @handler_flags: handler flags
  *
  * Register handler. Enable vga_switcheroo if two vga clients have already
  * registered.
  *
  * Return: 0 on success, -EINVAL if a handler was already registered.
  */
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler)
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+				    enum vga_switcheroo_handler_flags_t handler_flags)
 {
 	mutex_lock(&vgasr_mutex);
 	if (vgasr_priv.handler) {
@@ -205,6 +223,7 @@
 	}
 
 	vgasr_priv.handler = handler;
+	vgasr_priv.handler_flags = handler_flags;
 	if (vga_switcheroo_ready()) {
 		pr_info("enabled\n");
 		vga_switcheroo_enable();
@@ -222,16 +241,33 @@
 void vga_switcheroo_unregister_handler(void)
 {
 	mutex_lock(&vgasr_mutex);
+	mutex_lock(&vgasr_priv.mux_hw_lock);
+	vgasr_priv.handler_flags = 0;
 	vgasr_priv.handler = NULL;
 	if (vgasr_priv.active) {
 		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
+	mutex_unlock(&vgasr_priv.mux_hw_lock);
 	mutex_unlock(&vgasr_mutex);
 }
 EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
 
+/**
+ * vga_switcheroo_handler_flags() - obtain handler flags
+ *
+ * Helper for clients to obtain the handler flags bitmask.
+ *
+ * Return: Handler flags. A value of 0 means that no handler is registered
+ * or that the handler has no special capabilities.
+ */
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void)
+{
+	return vgasr_priv.handler_flags;
+}
+EXPORT_SYMBOL(vga_switcheroo_handler_flags);
+
 static int register_client(struct pci_dev *pdev,
 			   const struct vga_switcheroo_client_ops *ops,
 			   enum vga_switcheroo_client_id id, bool active,
@@ -413,6 +449,76 @@
 EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
 
 /**
+ * vga_switcheroo_lock_ddc() - temporarily switch DDC lines to a given client
+ * @pdev: client pci device
+ *
+ * Temporarily switch DDC lines to the client identified by @pdev
+ * (but leave the outputs otherwise switched to where they are).
+ * This allows the inactive client to probe EDID. The DDC lines must
+ * afterwards be switched back by calling vga_switcheroo_unlock_ddc(),
+ * even if this function returns an error.
+ *
+ * Return: Previous DDC owner on success or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * The return value has merely an informational purpose for any caller
+ * which might be interested in it. It is acceptable to ignore the return
+ * value and simply rely on the result of the subsequent EDID probe,
+ * which will be %NULL if DDC switching failed.
+ */
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev)
+{
+	enum vga_switcheroo_client_id id;
+
+	mutex_lock(&vgasr_priv.mux_hw_lock);
+	if (!vgasr_priv.handler || !vgasr_priv.handler->switch_ddc) {
+		vgasr_priv.old_ddc_owner = -ENODEV;
+		return -ENODEV;
+	}
+
+	id = vgasr_priv.handler->get_client_id(pdev);
+	vgasr_priv.old_ddc_owner = vgasr_priv.handler->switch_ddc(id);
+	return vgasr_priv.old_ddc_owner;
+}
+EXPORT_SYMBOL(vga_switcheroo_lock_ddc);
+
+/**
+ * vga_switcheroo_unlock_ddc() - switch DDC lines back to previous owner
+ * @pdev: client pci device
+ *
+ * Switch DDC lines back to the previous owner after calling
+ * vga_switcheroo_lock_ddc(). This must be called even if
+ * vga_switcheroo_lock_ddc() returned an error.
+ *
+ * Return: Previous DDC owner on success (i.e. the client identifier of @pdev)
+ * or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * Finally, invoking this function without calling vga_switcheroo_lock_ddc()
+ * first is not allowed and will result in %-EINVAL.
+ */
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev)
+{
+	enum vga_switcheroo_client_id id;
+	int ret = vgasr_priv.old_ddc_owner;
+
+	if (WARN_ON_ONCE(!mutex_is_locked(&vgasr_priv.mux_hw_lock)))
+		return -EINVAL;
+
+	if (vgasr_priv.old_ddc_owner >= 0) {
+		id = vgasr_priv.handler->get_client_id(pdev);
+		if (vgasr_priv.old_ddc_owner != id)
+			ret = vgasr_priv.handler->switch_ddc(
+						     vgasr_priv.old_ddc_owner);
+	}
+	mutex_unlock(&vgasr_priv.mux_hw_lock);
+	return ret;
+}
+EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
+
+/**
  * DOC: Manual switching and manual power control
  *
  * In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch
@@ -549,7 +655,9 @@
 		console_unlock();
 	}
 
+	mutex_lock(&vgasr_priv.mux_hw_lock);
 	ret = vgasr_priv.handler->switchto(new_client->id);
+	mutex_unlock(&vgasr_priv.mux_hw_lock);
 	if (ret)
 		return ret;
 
@@ -664,7 +772,9 @@
 	vgasr_priv.delayed_switch_active = false;
 
 	if (just_mux) {
+		mutex_lock(&vgasr_priv.mux_hw_lock);
 		ret = vgasr_priv.handler->switchto(client_id);
+		mutex_unlock(&vgasr_priv.mux_hw_lock);
 		goto out;
 	}
 
@@ -876,8 +986,11 @@
 	if (ret)
 		return ret;
 	mutex_lock(&vgasr_mutex);
-	if (vgasr_priv.handler->switchto)
+	if (vgasr_priv.handler->switchto) {
+		mutex_lock(&vgasr_priv.mux_hw_lock);
 		vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+		mutex_unlock(&vgasr_priv.mux_hw_lock);
+	}
 	vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
 	mutex_unlock(&vgasr_mutex);
 	return 0;
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index f236250..4034d2d 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -19,6 +19,7 @@
 #include <linux/acpi.h>
 #include <linux/pnp.h>
 #include <linux/apple_bl.h>
+#include <linux/apple-gmux.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
@@ -57,7 +58,9 @@
 	/* switcheroo data */
 	acpi_handle dhandle;
 	int gpe;
-	enum vga_switcheroo_client_id resume_client_id;
+	enum vga_switcheroo_client_id switch_state_display;
+	enum vga_switcheroo_client_id switch_state_ddc;
+	enum vga_switcheroo_client_id switch_state_external;
 	enum vga_switcheroo_state power_state;
 	struct completion powerchange_done;
 };
@@ -368,21 +371,72 @@
  * for the selected GPU.
  */
 
+static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
+{
+	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DDC) == 1)
+		gmux_data->switch_state_ddc = VGA_SWITCHEROO_IGD;
+	else
+		gmux_data->switch_state_ddc = VGA_SWITCHEROO_DIS;
+
+	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
+		gmux_data->switch_state_display = VGA_SWITCHEROO_IGD;
+	else
+		gmux_data->switch_state_display = VGA_SWITCHEROO_DIS;
+
+	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL) == 2)
+		gmux_data->switch_state_external = VGA_SWITCHEROO_IGD;
+	else
+		gmux_data->switch_state_external = VGA_SWITCHEROO_DIS;
+}
+
+static void gmux_write_switch_state(struct apple_gmux_data *gmux_data)
+{
+	if (gmux_data->switch_state_ddc == VGA_SWITCHEROO_IGD)
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+	else
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+
+	if (gmux_data->switch_state_display == VGA_SWITCHEROO_IGD)
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
+	else
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
+
+	if (gmux_data->switch_state_external == VGA_SWITCHEROO_IGD)
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
+	else
+		gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+}
+
 static int gmux_switchto(enum vga_switcheroo_client_id id)
 {
-	if (id == VGA_SWITCHEROO_IGD) {
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
-	} else {
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
-		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
-	}
+	apple_gmux_data->switch_state_ddc = id;
+	apple_gmux_data->switch_state_display = id;
+	apple_gmux_data->switch_state_external = id;
+
+	gmux_write_switch_state(apple_gmux_data);
 
 	return 0;
 }
 
+static int gmux_switch_ddc(enum vga_switcheroo_client_id id)
+{
+	enum vga_switcheroo_client_id old_ddc_owner =
+		apple_gmux_data->switch_state_ddc;
+
+	if (id == old_ddc_owner)
+		return id;
+
+	pr_debug("Switching DDC from %d to %d\n", old_ddc_owner, id);
+	apple_gmux_data->switch_state_ddc = id;
+
+	if (id == VGA_SWITCHEROO_IGD)
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+	else
+		gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+
+	return old_ddc_owner;
+}
+
 /**
  * DOC: Power control
  *
@@ -440,21 +494,19 @@
 		return VGA_SWITCHEROO_DIS;
 }
 
-static enum vga_switcheroo_client_id
-gmux_active_client(struct apple_gmux_data *gmux_data)
-{
-	if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
-		return VGA_SWITCHEROO_IGD;
-
-	return VGA_SWITCHEROO_DIS;
-}
-
-static const struct vga_switcheroo_handler gmux_handler = {
+static const struct vga_switcheroo_handler gmux_handler_indexed = {
 	.switchto = gmux_switchto,
 	.power_state = gmux_set_power_state,
 	.get_client_id = gmux_get_client_id,
 };
 
+static const struct vga_switcheroo_handler gmux_handler_classic = {
+	.switchto = gmux_switchto,
+	.switch_ddc = gmux_switch_ddc,
+	.power_state = gmux_set_power_state,
+	.get_client_id = gmux_get_client_id,
+};
+
 /**
  * DOC: Interrupt
  *
@@ -513,7 +565,6 @@
 	struct pnp_dev *pnp = to_pnp_dev(dev);
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
 
-	gmux_data->resume_client_id = gmux_active_client(gmux_data);
 	gmux_disable_interrupts(gmux_data);
 	return 0;
 }
@@ -524,7 +575,7 @@
 	struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
 
 	gmux_enable_interrupts(gmux_data);
-	gmux_switchto(gmux_data->resume_client_id);
+	gmux_write_switch_state(gmux_data);
 	if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
 		gmux_set_discrete_state(gmux_data, gmux_data->power_state);
 	return 0;
@@ -704,9 +755,23 @@
 	apple_gmux_data = gmux_data;
 	init_completion(&gmux_data->powerchange_done);
 	gmux_enable_interrupts(gmux_data);
+	gmux_read_switch_state(gmux_data);
 
-	if (vga_switcheroo_register_handler(&gmux_handler)) {
-		ret = -ENODEV;
+	/*
+	 * Retina MacBook Pros cannot switch the panel's AUX separately
+	 * and need eDP pre-calibration. They are distinguishable from
+	 * pre-retinas by having an "indexed" gmux.
+	 *
+	 * Pre-retina MacBook Pros can switch the panel's DDC separately.
+	 */
+	if (gmux_data->indexed)
+		ret = vga_switcheroo_register_handler(&gmux_handler_indexed,
+					      VGA_SWITCHEROO_NEEDS_EDP_CONFIG);
+	else
+		ret = vga_switcheroo_register_handler(&gmux_handler_classic,
+					      VGA_SWITCHEROO_CAN_SWITCH_DDC);
+	if (ret) {
+		pr_err("Failed to register vga_switcheroo handler\n");
 		goto err_register_handler;
 	}
 
@@ -764,7 +829,7 @@
 }
 
 static const struct pnp_device_id gmux_device_ids[] = {
-	{"APP000B", 0},
+	{GMUX_ACPI_HID, 0},
 	{"", 0}
 };
 
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 1c872bd..bceb813 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1123,8 +1123,7 @@
 {
 }
 
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-					size_t len,
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 					enum dma_data_direction direction)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
@@ -1142,8 +1141,7 @@
 	return PTR_ERR_OR_ZERO(vaddr);
 }
 
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-				       size_t len,
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 				       enum dma_data_direction direction)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index b8dcf5a..da34bc12 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -109,7 +109,7 @@
 	if (offset > dma_buf->size || size > dma_buf->size - offset)
 		return -EINVAL;
 
-	ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+	ret = dma_buf_begin_cpu_access(dma_buf, dir);
 	if (ret)
 		return ret;
 
@@ -139,7 +139,7 @@
 		copy_offset = 0;
 	}
 err:
-	dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+	dma_buf_end_cpu_access(dma_buf, dir);
 	return ret;
 }
 
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.h b/drivers/video/fbdev/omap2/omapfb/dss/dispc.h
index 4837442..e014d04 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.h
@@ -915,4 +915,5 @@
 		return 0;
 	}
 }
+
 #endif
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index b9066af..0184a84 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -412,6 +412,44 @@
 int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
 		bool mem_to_mem, const struct omap_video_timings *timings);
 
+u32 dispc_read_irqstatus(void);
+void dispc_clear_irqstatus(u32 mask);
+u32 dispc_read_irqenable(void);
+void dispc_write_irqenable(u32 mask);
+
+int dispc_request_irq(irq_handler_t handler, void *dev_id);
+void dispc_free_irq(void *dev_id);
+
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
+
+void dispc_mgr_enable(enum omap_channel channel, bool enable);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
+bool dispc_mgr_go_busy(enum omap_channel channel);
+void dispc_mgr_go(enum omap_channel channel);
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+		const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(enum omap_channel channel,
+		const struct omap_video_timings *timings);
+void dispc_mgr_setup(enum omap_channel channel,
+		const struct omap_overlay_manager_info *info);
+
+int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
+		const struct omap_overlay_info *oi,
+		const struct omap_video_timings *timings,
+		int *x_predecim, int *y_predecim);
+
+int dispc_ovl_enable(enum omap_plane plane, bool enable);
+bool dispc_ovl_enabled(enum omap_plane plane);
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+		enum omap_channel channel);
+int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
+		bool replication, const struct omap_video_timings *mgr_timings,
+		bool mem_to_mem);
+
 /* VENC */
 int venc_init_platform_driver(void) __init;
 void venc_uninit_platform_driver(void);
@@ -465,4 +503,44 @@
 		const struct dss_pll_clock_info *cinfo);
 int dss_pll_wait_reset_done(struct dss_pll *pll);
 
+/* compat */
+
+struct dss_mgr_ops {
+	int (*connect)(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dst);
+	void (*disconnect)(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dst);
+
+	void (*start_update)(struct omap_overlay_manager *mgr);
+	int (*enable)(struct omap_overlay_manager *mgr);
+	void (*disable)(struct omap_overlay_manager *mgr);
+	void (*set_timings)(struct omap_overlay_manager *mgr,
+			const struct omap_video_timings *timings);
+	void (*set_lcd_config)(struct omap_overlay_manager *mgr,
+			const struct dss_lcd_mgr_config *config);
+	int (*register_framedone_handler)(struct omap_overlay_manager *mgr,
+			void (*handler)(void *), void *data);
+	void (*unregister_framedone_handler)(struct omap_overlay_manager *mgr,
+			void (*handler)(void *), void *data);
+};
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+void dss_uninstall_mgr_ops(void);
+
+int dss_mgr_connect(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dst);
+void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
+		struct omap_dss_device *dst);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+		const struct omap_video_timings *timings);
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+		const struct dss_lcd_mgr_config *config);
+int dss_mgr_enable(struct omap_overlay_manager *mgr);
+void dss_mgr_disable(struct omap_overlay_manager *mgr);
+void dss_mgr_start_update(struct omap_overlay_manager *mgr);
+int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+		void (*handler)(void *), void *data);
+void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+		void (*handler)(void *), void *data);
+
 #endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d7162cf..3c8422c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -283,6 +283,7 @@
 struct drm_pending_event {
 	struct drm_event *event;
 	struct list_head link;
+	struct list_head pending_link;
 	struct drm_file *file_priv;
 	pid_t pid; /* pid of requester, no guarantee it's valid by the time
 		      we deliver the event, for tracing only */
@@ -346,6 +347,7 @@
 	struct list_head blobs;
 
 	wait_queue_head_t event_wait;
+	struct list_head pending_event_list;
 	struct list_head event_list;
 	int event_space;
 
@@ -919,15 +921,25 @@
 			     unsigned int cmd, unsigned long arg);
 extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
 
-				/* Device support (drm_fops.h) */
-extern int drm_open(struct inode *inode, struct file *filp);
-extern ssize_t drm_read(struct file *filp, char __user *buffer,
-			size_t count, loff_t *offset);
-extern int drm_release(struct inode *inode, struct file *filp);
-extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
-
-				/* Mapping support (drm_vm.h) */
-extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+/* File Operations (drm_fops.c) */
+int drm_open(struct inode *inode, struct file *filp);
+ssize_t drm_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset);
+int drm_release(struct inode *inode, struct file *filp);
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+int drm_event_reserve_init_locked(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_pending_event *p,
+				  struct drm_event *e);
+int drm_event_reserve_init(struct drm_device *dev,
+			   struct drm_file *file_priv,
+			   struct drm_pending_event *p,
+			   struct drm_event *e);
+void drm_event_cancel_free(struct drm_device *dev,
+			   struct drm_pending_event *p);
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
 
 /* Misc. IOCTL support (drm_ioctl.c) */
 int drm_noop(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index fe5efad..9054598c 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -146,6 +146,9 @@
 					    struct drm_connector_state *state);
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
 					  struct drm_connector_state *state);
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+					u16 *red, u16 *green, u16 *blue,
+					uint32_t start, uint32_t size);
 
 /**
  * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c5b4b81..e0170bf 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -305,12 +305,20 @@
  * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
  * @active_changed: crtc_state->active has been toggled.
  * @connectors_changed: connectors to this crtc have been updated
+ * @color_mgmt_changed: color management properties have changed (degamma or
+ *	gamma LUT or CSC matrix)
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
+ * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  * 	update to ensure framebuffer cleanup isn't done too early
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
+ * @degamma_lut: Lookup table for converting framebuffer pixel data
+ *	before apply the conversion matrix
+ * @ctm: Transformation matrix
+ * @gamma_lut: Lookup table for converting pixel data after the
+ *	conversion matrix
  * @event: optional pointer to a DRM event to signal upon completion of the
  * 	state update
  * @state: backpointer to global drm_atomic_state
@@ -332,6 +340,7 @@
 	bool mode_changed : 1;
 	bool active_changed : 1;
 	bool connectors_changed : 1;
+	bool color_mgmt_changed : 1;
 
 	/* attached planes bitmask:
 	 * WARNING: transitional helpers do not maintain plane_mask so
@@ -341,6 +350,7 @@
 	u32 plane_mask;
 
 	u32 connector_mask;
+	u32 encoder_mask;
 
 	/* last_vblank_count: for vblank waits before cleanup */
 	u32 last_vblank_count;
@@ -353,6 +363,11 @@
 	/* blob property to expose current mode to atomic userspace */
 	struct drm_property_blob *mode_blob;
 
+	/* blob property to expose color management to userspace */
+	struct drm_property_blob *degamma_lut;
+	struct drm_property_blob *ctm;
+	struct drm_property_blob *gamma_lut;
+
 	struct drm_pending_vblank_event *event;
 
 	struct drm_atomic_state *state;
@@ -755,7 +770,7 @@
 	int x, y;
 	const struct drm_crtc_funcs *funcs;
 
-	/* CRTC gamma size for reporting to userspace */
+	/* Legacy FB CRTC gamma size for reporting to userspace */
 	uint32_t gamma_size;
 	uint16_t *gamma_store;
 
@@ -1582,6 +1597,8 @@
 	 *
 	 * The bridge can assume that the display pipe (i.e. clocks and timing
 	 * signals) feeding it is still running when this callback is called.
+	 *
+	 * The disable callback is optional.
 	 */
 	void (*disable)(struct drm_bridge *bridge);
 
@@ -1598,6 +1615,8 @@
 	 * The bridge must assume that the display pipe (i.e. clocks and timing
 	 * singals) feeding it is no longer running when this callback is
 	 * called.
+	 *
+	 * The post_disable callback is optional.
 	 */
 	void (*post_disable)(struct drm_bridge *bridge);
 
@@ -1626,6 +1645,8 @@
 	 * will not yet be running when this callback is called. The bridge must
 	 * not enable the display link feeding the next bridge in the chain (if
 	 * there is one) when this callback is called.
+	 *
+	 * The pre_enable callback is optional.
 	 */
 	void (*pre_enable)(struct drm_bridge *bridge);
 
@@ -1643,6 +1664,8 @@
 	 * signals) feeding it is running when this callback is called. This
 	 * callback must enable the display link feeding the next bridge in the
 	 * chain if there is one.
+	 *
+	 * The enable callback is optional.
 	 */
 	void (*enable)(struct drm_bridge *bridge);
 };
@@ -1675,6 +1698,7 @@
  * @dev: parent DRM device
  * @allow_modeset: allow full modeset
  * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
  * @planes: pointer to array of plane pointers
  * @plane_states: pointer to array of plane states pointers
  * @crtcs: pointer to array of CRTC pointers
@@ -1688,6 +1712,7 @@
 	struct drm_device *dev;
 	bool allow_modeset : 1;
 	bool legacy_cursor_update : 1;
+	bool legacy_set_config : 1;
 	struct drm_plane **planes;
 	struct drm_plane_state **plane_states;
 	struct drm_crtc **crtcs;
@@ -2024,6 +2049,15 @@
  * @property_blob_list: list of all the blob property objects
  * @blob_lock: mutex for blob property allocation and management
  * @*_property: core property tracking
+ * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
+ *	gamma
+ * @degamma_lut_size_property: size of the degamma LUT as supported by the
+ *	driver (read-only)
+ * @ctm_property: Matrix used to convert colors after the lookup in the
+ *	degamma LUT
+ * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
+ *	the gamma space of the connected screen (read-only)
+ * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
  * @preferred_depth: preferred RBG pixel depth, used by fb helpers
  * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
  * @async_page_flip: does this device support async flips on the primary plane?
@@ -2126,6 +2160,13 @@
 	struct drm_property *aspect_ratio_property;
 	struct drm_property *dirty_info_property;
 
+	/* Optional color correction properties */
+	struct drm_property *degamma_lut_property;
+	struct drm_property *degamma_lut_size_property;
+	struct drm_property *ctm_property;
+	struct drm_property *gamma_lut_property;
+	struct drm_property *gamma_lut_size_property;
+
 	/* properties for virtual machine layout */
 	struct drm_property *suggested_x_property;
 	struct drm_property *suggested_y_property;
@@ -2155,6 +2196,17 @@
 	list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
 		for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
 
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+	list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+		for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
 
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -2231,6 +2283,7 @@
 		     struct drm_encoder *encoder,
 		     const struct drm_encoder_funcs *funcs,
 		     int encoder_type, const char *name, ...);
+extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
 
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2288,6 +2341,8 @@
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+					    struct i2c_adapter *adapter);
 extern struct edid *drm_edid_duplicate(const struct edid *edid);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_config_init(struct drm_device *dev);
@@ -2488,6 +2543,8 @@
 extern int drm_format_plane_cpp(uint32_t format, int plane);
 extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern int drm_format_plane_width(int width, uint32_t format, int plane);
+extern int drm_format_plane_height(int height, uint32_t format, int plane);
 extern const char *drm_get_format_name(uint32_t format);
 extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
 							      unsigned int supported_rotations);
@@ -2536,6 +2593,21 @@
 	return mo ? obj_to_property(mo) : NULL;
 }
 
+/*
+ * Extract a degamma/gamma LUT value provided by user and round it to the
+ * precision supported by the hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+					     uint32_t bit_precision)
+{
+	uint32_t val = user_input + (1 << (16 - bit_precision - 1));
+	uint32_t max = 0xffff >> (16 - bit_precision);
+
+	val >>= 16 - bit_precision;
+
+	return clamp_val(val, 0, max);
+}
+
 /* Plane list iterator for legacy (overlay only) planes. */
 #define drm_for_each_legacy_plane(plane, dev) \
 	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4b37afa..97fa894 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -48,6 +48,9 @@
 				     struct drm_display_mode *mode,
 				     int x, int y,
 				     struct drm_framebuffer *old_fb);
+extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+					      int degamma_lut_size,
+					      int gamma_lut_size);
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h
new file mode 100644
index 0000000..1b76d99
--- /dev/null
+++ b/include/drm/drm_dp_aux_dev.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#ifndef DRM_DP_AUX_DEV
+#define DRM_DP_AUX_DEV
+
+#include <drm/drm_dp_helper.h>
+
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_dp_aux_dev_init(void)
+{
+	return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+	return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index d8a40df..062723b 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -219,6 +219,7 @@
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fb_helper_modinit(void);
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -283,6 +284,11 @@
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
 #else
+static inline int drm_fb_helper_modinit(void)
+{
+	return 0;
+}
+
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
 					struct drm_fb_helper *helper,
 					const struct drm_fb_helper_funcs *funcs)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 1b3b1f8..7a9840f 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -96,14 +96,17 @@
  * struct mipi_dsi_host - DSI host device
  * @dev: driver model device node for this DSI host
  * @ops: DSI host operations
+ * @list: list management
  */
 struct mipi_dsi_host {
 	struct device *dev;
 	const struct mipi_dsi_host_ops *ops;
+	struct list_head list;
 };
 
 int mipi_dsi_host_register(struct mipi_dsi_host *host);
 void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
 
 /* DSI mode flags */
 
@@ -139,10 +142,28 @@
 	MIPI_DSI_FMT_RGB565,
 };
 
+#define DSI_DEV_NAME_SIZE		20
+
+/**
+ * struct mipi_dsi_device_info - template for creating a mipi_dsi_device
+ * @type: DSI peripheral chip type
+ * @channel: DSI virtual channel assigned to peripheral
+ * @node: pointer to OF device node or NULL
+ *
+ * This is populated and passed to mipi_dsi_device_new to create a new
+ * DSI device
+ */
+struct mipi_dsi_device_info {
+	char type[DSI_DEV_NAME_SIZE];
+	u32 channel;
+	struct device_node *node;
+};
+
 /**
  * struct mipi_dsi_device - DSI peripheral device
  * @host: DSI host for this peripheral
  * @dev: driver model device node for this peripheral
+ * @name: DSI peripheral chip type
  * @channel: virtual channel assigned to the peripheral
  * @format: pixel format for video mode
  * @lanes: number of active data lanes
@@ -152,6 +173,7 @@
 	struct mipi_dsi_host *host;
 	struct device dev;
 
+	char name[DSI_DEV_NAME_SIZE];
 	unsigned int channel;
 	unsigned int lanes;
 	enum mipi_dsi_pixel_format format;
@@ -188,6 +210,10 @@
 	return -EINVAL;
 }
 
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+			      const struct mipi_dsi_device_info *info);
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
 struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
 int mipi_dsi_attach(struct mipi_dsi_device *dsi);
 int mipi_dsi_detach(struct mipi_dsi_device *dsi);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index a126a0d..b61c2d4 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -439,7 +439,7 @@
 	 * can be modified by this callback and does not need to match mode.
 	 *
 	 * This function is used by both legacy CRTC helpers and atomic helpers.
-	 * With atomic helpers it is optional.
+	 * This hook is optional.
 	 *
 	 * NOTE:
 	 *
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 8544665..3fd87b3 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -1,9 +1,12 @@
 #ifndef __DRM_OF_H__
 #define __DRM_OF_H__
 
+#include <linux/of_graph.h>
+
 struct component_master_ops;
 struct device;
 struct drm_device;
+struct drm_encoder;
 struct device_node;
 
 #ifdef CONFIG_OF
@@ -12,6 +15,9 @@
 extern int drm_of_component_probe(struct device *dev,
 				  int (*compare_of)(struct device *, void *),
 				  const struct component_master_ops *m_ops);
+extern int drm_of_encoder_active_endpoint(struct device_node *node,
+					  struct drm_encoder *encoder,
+					  struct of_endpoint *endpoint);
 #else
 static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
 						  struct device_node *port)
@@ -26,6 +32,33 @@
 {
 	return -EINVAL;
 }
+
+static inline int drm_of_encoder_active_endpoint(struct device_node *node,
+						 struct drm_encoder *encoder,
+						 struct of_endpoint *endpoint)
+{
+	return -EINVAL;
+}
 #endif
 
+static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
+						    struct drm_encoder *encoder)
+{
+	struct of_endpoint endpoint;
+	int ret = drm_of_encoder_active_endpoint(node, encoder,
+						 &endpoint);
+
+	return ret ?: endpoint.id;
+}
+
+static inline int drm_of_encoder_active_port_id(struct device_node *node,
+						struct drm_encoder *encoder)
+{
+	struct of_endpoint endpoint;
+	int ret = drm_of_encoder_active_endpoint(node, encoder,
+						 &endpoint);
+
+	return ret ?: endpoint.port;
+}
+
 #endif /* __DRM_OF_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
deleted file mode 100644
index cb65fa1..0000000
--- a/include/drm/exynos_drm.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* exynos_drm.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#ifndef _EXYNOS_DRM_H_
-#define _EXYNOS_DRM_H_
-
-#include <uapi/drm/exynos_drm.h>
-#include <video/videomode.h>
-
-/**
- * A structure for lcd panel information.
- *
- * @timing: default video mode for initializing
- * @width_mm: physical size of lcd width.
- * @height_mm: physical size of lcd height.
- */
-struct exynos_drm_panel_info {
-	struct videomode vm;
-	u32 width_mm;
-	u32 height_mm;
-};
-
-/**
- * Platform Specific Structure for DRM based FIMD.
- *
- * @panel: default panel info for initializing
- * @default_win: default window layer number to be used for UI.
- * @bpp: default bit per pixel.
- */
-struct exynos_drm_fimd_pdata {
-	struct exynos_drm_panel_info panel;
-	u32				vidcon0;
-	u32				vidcon1;
-	unsigned int			default_win;
-	unsigned int			bpp;
-};
-
-/**
- * Platform Specific Structure for DRM based HDMI.
- *
- * @hdmi_dev: device point to specific hdmi driver.
- * @mixer_dev: device point to specific mixer driver.
- *
- * this structure is used for common hdmi driver and each device object
- * would be used to access specific device driver(hdmi or mixer driver)
- */
-struct exynos_drm_common_hdmi_pd {
-	struct device *hdmi_dev;
-	struct device *mixer_dev;
-};
-
-/**
- * Platform Specific Structure for DRM based HDMI core.
- *
- * @is_v13: set if hdmi version 13 is.
- * @cfg_hpd: function pointer to configure hdmi hotplug detection pin
- * @get_hpd: function pointer to get value of hdmi hotplug detection pin
- */
-struct exynos_drm_hdmi_pdata {
-	bool is_v13;
-	void (*cfg_hpd)(bool external);
-	int (*get_hpd)(void);
-};
-
-/**
- * Platform Specific Structure for DRM based IPP.
- *
- * @inv_pclk: if set 1. invert pixel clock
- * @inv_vsync: if set 1. invert vsync signal for wb
- * @inv_href: if set 1. invert href signal
- * @inv_hsync: if set 1. invert hsync signal for wb
- */
-struct exynos_drm_ipp_pol {
-	unsigned int inv_pclk;
-	unsigned int inv_vsync;
-	unsigned int inv_href;
-	unsigned int inv_hsync;
-};
-
-/**
- * Platform Specific Structure for DRM based FIMC.
- *
- * @pol: current hardware block polarity settings.
- * @clk_rate: current hardware clock rate.
- */
-struct exynos_drm_fimc_pdata {
-	struct exynos_drm_ipp_pol pol;
-	int clk_rate;
-};
-
-#endif	/* _EXYNOS_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index f970209..9094599 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -277,7 +277,9 @@
 	INTEL_VGA_DEVICE(0x191D, info)  /* WKS GT2 */
 
 #define INTEL_SKL_GT3_IDS(info) \
+	INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
 	INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+	INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
 	INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
 	INTEL_VGA_DEVICE(0x192A, info)  /* SRV GT3 */
 
@@ -296,7 +298,9 @@
 #define INTEL_BXT_IDS(info) \
 	INTEL_VGA_DEVICE(0x0A84, info), \
 	INTEL_VGA_DEVICE(0x1A84, info), \
-	INTEL_VGA_DEVICE(0x5A84, info)
+	INTEL_VGA_DEVICE(0x1A85, info), \
+	INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
+	INTEL_VGA_DEVICE(0x5A85, info)  /* APL HD Graphics 500 */
 
 #define INTEL_KBL_GT1_IDS(info)	\
 	INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
new file mode 100644
index 0000000..b2d32e0
--- /dev/null
+++ b/include/linux/apple-gmux.h
@@ -0,0 +1,50 @@
+/*
+ * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
+ * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_APPLE_GMUX_H
+#define LINUX_APPLE_GMUX_H
+
+#include <linux/acpi.h>
+
+#define GMUX_ACPI_HID "APP000B"
+
+#if IS_ENABLED(CONFIG_APPLE_GMUX)
+
+/**
+ * apple_gmux_present() - detect if gmux is built into the machine
+ *
+ * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+ * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+ *
+ * Return: %true if gmux is present and the kernel was configured
+ * with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_present(void)
+{
+	return acpi_dev_present(GMUX_ACPI_HID);
+}
+
+#else  /* !CONFIG_APPLE_GMUX */
+
+static inline bool apple_gmux_present(void)
+{
+	return false;
+}
+
+#endif /* !CONFIG_APPLE_GMUX */
+
+#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f98bd70..532108e 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -54,7 +54,7 @@
  * @release: release this buffer; to be called after the last dma_buf_put.
  * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
  * 		      caches and allocate backing storage (if not yet done)
- * 		      respectively pin the objet into memory.
+ * 		      respectively pin the object into memory.
  * @end_cpu_access: [optional] called after cpu access to flush caches.
  * @kmap_atomic: maps a page from the buffer into kernel address
  * 		 space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@
 	/* after final dma_buf_put() */
 	void (*release)(struct dma_buf *);
 
-	int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
-				enum dma_data_direction);
-	void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
-			       enum dma_data_direction);
+	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+	void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 	void *(*kmap_atomic)(struct dma_buf *, unsigned long);
 	void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
 	void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,9 +222,9 @@
 					enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
 				enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 			     enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 			    enum dma_data_direction dir);
 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 69e1d4a1..b39a5f3 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -36,6 +36,26 @@
 struct pci_dev;
 
 /**
+ * enum vga_switcheroo_handler_flags_t - handler flags bitmask
+ * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
+ * 	DDC lines separately. This signals to clients that they should call
+ * 	drm_get_edid_switcheroo() to probe the EDID
+ * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
+ * 	the AUX channel separately. This signals to clients that the active
+ * 	GPU needs to train the link and communicate the link parameters to the
+ * 	inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
+ * 	skip the AUX handshake and set up its output with these pre-calibrated
+ * 	values (DisplayPort specification v1.1a, section 2.5.3.3)
+ *
+ * Handler flags bitmask. Used by handlers to declare their capabilities upon
+ * registering with vga_switcheroo.
+ */
+enum vga_switcheroo_handler_flags_t {
+	VGA_SWITCHEROO_CAN_SWITCH_DDC	= (1 << 0),
+	VGA_SWITCHEROO_NEEDS_EDP_CONFIG	= (1 << 1),
+};
+
+/**
  * enum vga_switcheroo_state - client power state
  * @VGA_SWITCHEROO_OFF: off
  * @VGA_SWITCHEROO_ON: on
@@ -82,6 +102,9 @@
  * 	Mandatory. For muxless machines this should be a no-op. Returning 0
  * 	denotes success, anything else failure (in which case the switch is
  * 	aborted)
+ * @switch_ddc: switch DDC lines to given client.
+ * 	Optional. Should return the previous DDC owner on success or a
+ * 	negative int on failure
  * @power_state: cut or reinstate power of given client.
  * 	Optional. The return value is ignored
  * @get_client_id: determine if given pci device is integrated or discrete GPU.
@@ -93,6 +116,7 @@
 struct vga_switcheroo_handler {
 	int (*init)(void);
 	int (*switchto)(enum vga_switcheroo_client_id id);
+	int (*switch_ddc)(enum vga_switcheroo_client_id id);
 	int (*power_state)(enum vga_switcheroo_client_id id,
 			   enum vga_switcheroo_state state);
 	enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
@@ -132,8 +156,12 @@
 void vga_switcheroo_client_fb_set(struct pci_dev *dev,
 				  struct fb_info *info);
 
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+				    enum vga_switcheroo_handler_flags_t handler_flags);
 void vga_switcheroo_unregister_handler(void);
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
 
 int vga_switcheroo_process_delayed_switch(void);
 
@@ -150,11 +178,15 @@
 static inline int vga_switcheroo_register_client(struct pci_dev *dev,
 		const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
 static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+		enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
 static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
 	const struct vga_switcheroo_client_ops *ops,
 	enum vga_switcheroo_client_id id) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
+static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
+static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
+static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
 static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
 
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b4e92eb..a0ebfe7 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -669,6 +669,7 @@
 	__u64 value;
 };
 
+#define DRM_RDWR O_RDWR
 #define DRM_CLOEXEC O_CLOEXEC
 struct drm_prime_handle {
 	__u32 handle;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 50adb46..c021743 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -487,6 +487,21 @@
 	__u64 blue;
 };
 
+struct drm_color_ctm {
+	/* Conversion matrix in S31.32 format. */
+	__s64 matrix[9];
+};
+
+struct drm_color_lut {
+	/*
+	 * Data is U0.16 fixed point format.
+	 */
+	__u16 red;
+	__u16 green;
+	__u16 blue;
+	__u16 reserved;
+};
+
 #define DRM_MODE_PAGE_FLIP_EVENT 0x01
 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02
 #define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 312c67d..3947c2e 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -28,8 +28,21 @@
  */
 struct drm_exynos_gem_create {
 	__u64 size;
-	unsigned int flags;
-	unsigned int handle;
+	__u32 flags;
+	__u32 handle;
+};
+
+/**
+ * A structure for getting a fake-offset that can be used with mmap.
+ *
+ * @handle: handle of gem object.
+ * @reserved: just padding to be 64-bit aligned.
+ * @offset: a fake-offset of gem object.
+ */
+struct drm_exynos_gem_map {
+	__u32 handle;
+	__u32 reserved;
+	__u64 offset;
 };
 
 /**
@@ -42,8 +55,8 @@
  *	be set by driver.
  */
 struct drm_exynos_gem_info {
-	unsigned int handle;
-	unsigned int flags;
+	__u32 handle;
+	__u32 flags;
 	__u64 size;
 };
 
@@ -56,8 +69,8 @@
  * @edid: the edid data pointer from user side.
  */
 struct drm_exynos_vidi_connection {
-	unsigned int connection;
-	unsigned int extensions;
+	__u32 connection;
+	__u32 extensions;
 	__u64 edid;
 };
 
@@ -206,9 +219,9 @@
  * @pos: property of image position(src-cropped,dst-scaler).
  */
 struct drm_exynos_ipp_config {
-	enum drm_exynos_ops_id ops_id;
-	enum drm_exynos_flip	flip;
-	enum drm_exynos_degree	degree;
+	__u32 ops_id;
+	__u32 flip;
+	__u32 degree;
 	__u32	fmt;
 	struct drm_exynos_sz	sz;
 	struct drm_exynos_pos	pos;
@@ -233,7 +246,7 @@
  */
 struct drm_exynos_ipp_property {
 	struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
-	enum drm_exynos_ipp_cmd	cmd;
+	__u32	cmd;
 	__u32	ipp_id;
 	__u32	prop_id;
 	__u32	refresh_rate;
@@ -255,8 +268,8 @@
  * @user_data: user data.
  */
 struct drm_exynos_ipp_queue_buf {
-	enum drm_exynos_ops_id	ops_id;
-	enum drm_exynos_ipp_buf_type	buf_type;
+	__u32	ops_id;
+	__u32	buf_type;
 	__u32	prop_id;
 	__u32	buf_id;
 	__u32	handle[EXYNOS_DRM_PLANAR_MAX];
@@ -280,10 +293,11 @@
  */
 struct drm_exynos_ipp_cmd_ctrl {
 	__u32	prop_id;
-	enum drm_exynos_ipp_ctrl	ctrl;
+	__u32	ctrl;
 };
 
 #define DRM_EXYNOS_GEM_CREATE		0x00
+#define DRM_EXYNOS_GEM_MAP		0x01
 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
 #define DRM_EXYNOS_GEM_GET		0x04
 #define DRM_EXYNOS_VIDI_CONNECTION	0x07
@@ -301,7 +315,8 @@
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
-
+#define DRM_IOCTL_EXYNOS_GEM_MAP		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_GEM_MAP, struct drm_exynos_gem_map)
 #define DRM_IOCTL_EXYNOS_GEM_GET	DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_GEM_GET,	struct drm_exynos_gem_info)
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index acf2102..a5524cc 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -772,10 +772,12 @@
 #define I915_EXEC_HANDLE_LUT		(1<<12)
 
 /** Used for switching BSD rings on the platforms with two BSD rings */
-#define I915_EXEC_BSD_MASK		(3<<13)
-#define I915_EXEC_BSD_DEFAULT		(0<<13) /* default ping-pong mode */
-#define I915_EXEC_BSD_RING1		(1<<13)
-#define I915_EXEC_BSD_RING2		(2<<13)
+#define I915_EXEC_BSD_SHIFT	 (13)
+#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
 
 /** Tell the kernel that the batchbuffer is processed by
  *  the resource streamer.
@@ -812,10 +814,35 @@
 	/** Handle of the buffer to check for busy */
 	__u32 handle;
 
-	/** Return busy status (1 if busy, 0 if idle).
-	 * The high word is used to indicate on which rings the object
-	 * currently resides:
-	 *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+	/** Return busy status
+	 *
+	 * A return of 0 implies that the object is idle (after
+	 * having flushed any pending activity), and a non-zero return that
+	 * the object is still in-flight on the GPU. (The GPU has not yet
+	 * signaled completion for all pending requests that reference the
+	 * object.)
+	 *
+	 * The returned dword is split into two fields to indicate both
+	 * the engines on which the object is being read, and the
+	 * engine on which it is currently being written (if any).
+	 *
+	 * The low word (bits 0:15) indicate if the object is being written
+	 * to by any engine (there can only be one, as the GEM implicit
+	 * synchronisation rules force writes to be serialised). Only the
+	 * engine for the last write is reported.
+	 *
+	 * The high word (bits 16:31) are a bitmask of which engines are
+	 * currently reading from the object. Multiple engines may be
+	 * reading from the object simultaneously.
+	 *
+	 * The value of each engine is the same as specified in the
+	 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
+	 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
+	 * the I915_EXEC_RENDER engine for execution, and so it is never
+	 * reported as active itself. Some hardware may have parallel
+	 * execution engines, e.g. multiple media engines, which are
+	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
+	 * so are not separately reported for busyness.
 	 */
 	__u32 busy;
 };
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 81e6e0d..254d3e9 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -50,6 +50,8 @@
 #define MSM_PARAM_GPU_ID     0x01
 #define MSM_PARAM_GMEM_SIZE  0x02
 #define MSM_PARAM_CHIP_ID    0x03
+#define MSM_PARAM_MAX_FREQ   0x04
+#define MSM_PARAM_TIMESTAMP  0x05
 
 struct drm_msm_param {
 	__u32 pipe;           /* in, MSM_PIPE_x */
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
new file mode 100644
index 0000000..fb0dedb
--- /dev/null
+++ b/include/uapi/linux/dma-buf.h
@@ -0,0 +1,40 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2015 Intel Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DMA_BUF_UAPI_H_
+#define _DMA_BUF_UAPI_H_
+
+#include <linux/types.h>
+
+/* begin/end dma-buf functions used for userspace mmap. */
+struct dma_buf_sync {
+	__u64 flags;
+};
+
+#define DMA_BUF_SYNC_READ      (1 << 0)
+#define DMA_BUF_SYNC_WRITE     (2 << 0)
+#define DMA_BUF_SYNC_RW        (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
+#define DMA_BUF_SYNC_START     (0 << 2)
+#define DMA_BUF_SYNC_END       (1 << 2)
+#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
+	(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
+
+#define DMA_BUF_BASE		'b'
+#define DMA_BUF_IOCTL_SYNC	_IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+#endif
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 69279c0..56830d1 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -45,21 +45,6 @@
 	int data_lines;
 };
 
-/**
- * encoder_tpd12s015 platform data
- * @name: name for this display entity
- * @ct_cp_hpd_gpio: CT_CP_HPD gpio number
- * @ls_oe_gpio: LS_OE gpio number
- * @hpd_gpio: HPD gpio number
- */
-struct encoder_tpd12s015_platform_data {
-	const char *name;
-	const char *source;
-
-	int ct_cp_hpd_gpio;
-	int ls_oe_gpio;
-	int hpd_gpio;
-};
 
 /**
  * connector_dvi platform data
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 295b41e..8e14ad7 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -51,6 +51,7 @@
 #define DISPC_IRQ_FRAMEDONEWB		(1 << 23)
 #define DISPC_IRQ_FRAMEDONETV		(1 << 24)
 #define DISPC_IRQ_WBBUFFEROVERFLOW	(1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR	(1 << 26)
 #define DISPC_IRQ_SYNC_LOST3		(1 << 27)
 #define DISPC_IRQ_VSYNC3		(1 << 28)
 #define DISPC_IRQ_ACBIAS_COUNT_STAT3	(1 << 29)
@@ -331,8 +332,6 @@
 
 /* Init with the board info */
 extern int omap_display_init(struct omap_dss_board_info *board_data);
-/* HDMI mux init*/
-extern int omap_hdmi_init(enum omap_hdmi_flags flags);
 
 struct omap_video_timings {
 	/* Unit: pixels */
@@ -366,6 +365,8 @@
 	enum omap_dss_signal_level de_level;
 	/* Pixel clock edges to drive HSYNC and VSYNC signals */
 	enum omap_dss_signal_edge sync_pclk_edge;
+
+	bool double_pixel;
 };
 
 /* Hardcoded timings for tv modes. Venc only uses these to
@@ -769,6 +770,7 @@
 
 	/* DISPC channel for this output */
 	enum omap_channel dispc_channel;
+	bool dispc_channel_connected;
 
 	/* output instance */
 	enum omap_dss_output_id id;
@@ -782,13 +784,6 @@
 	struct omap_dss_device *dst;
 };
 
-struct omap_dss_hdmi_data
-{
-	int ct_cp_hpd_gpio;
-	int ls_oe_gpio;
-	int hpd_gpio;
-};
-
 struct omap_dss_driver {
 	int (*probe)(struct omap_dss_device *);
 	void (*remove)(struct omap_dss_device *);
@@ -897,85 +892,9 @@
 int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
 int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
 
-u32 dispc_read_irqstatus(void);
-void dispc_clear_irqstatus(u32 mask);
-u32 dispc_read_irqenable(void);
-void dispc_write_irqenable(u32 mask);
-
-int dispc_request_irq(irq_handler_t handler, void *dev_id);
-void dispc_free_irq(void *dev_id);
-
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
-
-void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
-u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
-u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
-u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
-bool dispc_mgr_go_busy(enum omap_channel channel);
-void dispc_mgr_go(enum omap_channel channel);
-void dispc_mgr_set_lcd_config(enum omap_channel channel,
-		const struct dss_lcd_mgr_config *config);
-void dispc_mgr_set_timings(enum omap_channel channel,
-		const struct omap_video_timings *timings);
-void dispc_mgr_setup(enum omap_channel channel,
-		const struct omap_overlay_manager_info *info);
-
-int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
-		const struct omap_overlay_info *oi,
-		const struct omap_video_timings *timings,
-		int *x_predecim, int *y_predecim);
-
-int dispc_ovl_enable(enum omap_plane plane, bool enable);
-bool dispc_ovl_enabled(enum omap_plane plane);
-void dispc_ovl_set_channel_out(enum omap_plane plane,
-		enum omap_channel channel);
-int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
-		bool replication, const struct omap_video_timings *mgr_timings,
-		bool mem_to_mem);
-
 int omapdss_compat_init(void);
 void omapdss_compat_uninit(void);
 
-struct dss_mgr_ops {
-	int (*connect)(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst);
-	void (*disconnect)(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst);
-
-	void (*start_update)(struct omap_overlay_manager *mgr);
-	int (*enable)(struct omap_overlay_manager *mgr);
-	void (*disable)(struct omap_overlay_manager *mgr);
-	void (*set_timings)(struct omap_overlay_manager *mgr,
-			const struct omap_video_timings *timings);
-	void (*set_lcd_config)(struct omap_overlay_manager *mgr,
-			const struct dss_lcd_mgr_config *config);
-	int (*register_framedone_handler)(struct omap_overlay_manager *mgr,
-			void (*handler)(void *), void *data);
-	void (*unregister_framedone_handler)(struct omap_overlay_manager *mgr,
-			void (*handler)(void *), void *data);
-};
-
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
-void dss_uninstall_mgr_ops(void);
-
-int dss_mgr_connect(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst);
-void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
-		struct omap_dss_device *dst);
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
-		const struct omap_video_timings *timings);
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
-		const struct dss_lcd_mgr_config *config);
-int dss_mgr_enable(struct omap_overlay_manager *mgr);
-void dss_mgr_disable(struct omap_overlay_manager *mgr);
-void dss_mgr_start_update(struct omap_overlay_manager *mgr);
-int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
-		void (*handler)(void *), void *data);
-void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
-		void (*handler)(void *), void *data);
-
 static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
 {
 	return dssdev->src;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b86cf26..560ad38 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -48,6 +48,12 @@
 DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 atomic_long_t nr_swap_pages;
+/*
+ * Some modules use swappable objects and may try to swap them out under
+ * memory pressure (via the shrinker). Before doing so, they may wish to
+ * check to see if any swap space is available.
+ */
+EXPORT_SYMBOL_GPL(nr_swap_pages);
 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
 long total_swap_pages;
 static int least_priority;